mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-22 06:18:14 +00:00
fix llm_call_completed event serialization issue
This commit is contained in:
@@ -292,14 +292,16 @@ class BaseLLM(ABC):
|
||||
from_agent: Agent | None = None,
|
||||
) -> None:
|
||||
"""Emit LLM call started event."""
|
||||
from crewai.utilities.serialization import to_serializable
|
||||
|
||||
if not hasattr(crewai_event_bus, "emit"):
|
||||
raise ValueError("crewai_event_bus does not have an emit method") from None
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LLMCallStartedEvent(
|
||||
messages=messages,
|
||||
tools=tools,
|
||||
messages=to_serializable(messages),
|
||||
tools=to_serializable(tools),
|
||||
callbacks=callbacks,
|
||||
available_functions=available_functions,
|
||||
from_task=from_task,
|
||||
@@ -317,11 +319,13 @@ class BaseLLM(ABC):
|
||||
messages: str | list[LLMMessage] | None = None,
|
||||
) -> None:
|
||||
"""Emit LLM call completed event."""
|
||||
from crewai.utilities.serialization import to_serializable
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LLMCallCompletedEvent(
|
||||
messages=messages,
|
||||
response=response,
|
||||
messages=to_serializable(messages),
|
||||
response=to_serializable(response),
|
||||
call_type=call_type,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
|
||||
@@ -589,6 +589,13 @@ class AnthropicCompletion(BaseLLM):
|
||||
# This allows the executor to manage tool execution with proper
|
||||
# message history and post-tool reasoning prompts
|
||||
if not available_functions:
|
||||
self._emit_call_completed_event(
|
||||
response=list(tool_uses),
|
||||
call_type=LLMCallType.TOOL_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
return list(tool_uses)
|
||||
|
||||
# Handle tool use conversation flow internally
|
||||
@@ -1004,6 +1011,13 @@ class AnthropicCompletion(BaseLLM):
|
||||
if tool_uses:
|
||||
# If no available_functions, return tool calls for executor to handle
|
||||
if not available_functions:
|
||||
self._emit_call_completed_event(
|
||||
response=list(tool_uses),
|
||||
call_type=LLMCallType.TOOL_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
return list(tool_uses)
|
||||
|
||||
return await self._ahandle_tool_use_conversation(
|
||||
|
||||
@@ -628,6 +628,13 @@ class AzureCompletion(BaseLLM):
|
||||
# If there are tool_calls but no available_functions, return the tool_calls
|
||||
# This allows the caller (e.g., executor) to handle tool execution
|
||||
if message.tool_calls and not available_functions:
|
||||
self._emit_call_completed_event(
|
||||
response=list(message.tool_calls),
|
||||
call_type=LLMCallType.TOOL_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
return list(message.tool_calls)
|
||||
|
||||
# Handle tool calls
|
||||
@@ -804,7 +811,7 @@ class AzureCompletion(BaseLLM):
|
||||
# If there are tool_calls but no available_functions, return them
|
||||
# in OpenAI-compatible format for executor to handle
|
||||
if tool_calls and not available_functions:
|
||||
return [
|
||||
formatted_tool_calls = [
|
||||
{
|
||||
"id": call_data.get("id", f"call_{idx}"),
|
||||
"type": "function",
|
||||
@@ -815,6 +822,14 @@ class AzureCompletion(BaseLLM):
|
||||
}
|
||||
for idx, call_data in tool_calls.items()
|
||||
]
|
||||
self._emit_call_completed_event(
|
||||
response=formatted_tool_calls,
|
||||
call_type=LLMCallType.TOOL_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
return formatted_tool_calls
|
||||
|
||||
# Handle completed tool calls
|
||||
if tool_calls and available_functions:
|
||||
|
||||
@@ -546,6 +546,18 @@ class BedrockCompletion(BaseLLM):
|
||||
"I apologize, but I received an empty response. Please try again."
|
||||
)
|
||||
|
||||
# If there are tool uses but no available_functions, return them for the executor to handle
|
||||
tool_uses = [block["toolUse"] for block in content if "toolUse" in block]
|
||||
if tool_uses and not available_functions:
|
||||
self._emit_call_completed_event(
|
||||
response=tool_uses,
|
||||
call_type=LLMCallType.TOOL_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=messages,
|
||||
)
|
||||
return tool_uses
|
||||
|
||||
# Process content blocks and handle tool use correctly
|
||||
text_content = ""
|
||||
|
||||
@@ -935,6 +947,18 @@ class BedrockCompletion(BaseLLM):
|
||||
"I apologize, but I received an empty response. Please try again."
|
||||
)
|
||||
|
||||
# If there are tool uses but no available_functions, return them for the executor to handle
|
||||
tool_uses = [block["toolUse"] for block in content if "toolUse" in block]
|
||||
if tool_uses and not available_functions:
|
||||
self._emit_call_completed_event(
|
||||
response=tool_uses,
|
||||
call_type=LLMCallType.TOOL_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=messages,
|
||||
)
|
||||
return tool_uses
|
||||
|
||||
text_content = ""
|
||||
|
||||
for content_block in content:
|
||||
|
||||
@@ -661,6 +661,13 @@ class GeminiCompletion(BaseLLM):
|
||||
# If there are function calls but no available_functions,
|
||||
# return them for the executor to handle (like OpenAI/Anthropic)
|
||||
if function_call_parts and not available_functions:
|
||||
self._emit_call_completed_event(
|
||||
response=function_call_parts,
|
||||
call_type=LLMCallType.TOOL_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=self._convert_contents_to_dict(contents),
|
||||
)
|
||||
return function_call_parts
|
||||
|
||||
# Otherwise execute the tools internally
|
||||
@@ -799,7 +806,7 @@ class GeminiCompletion(BaseLLM):
|
||||
# If there are function calls but no available_functions,
|
||||
# return them for the executor to handle
|
||||
if function_calls and not available_functions:
|
||||
return [
|
||||
formatted_function_calls = [
|
||||
{
|
||||
"id": call_data["id"],
|
||||
"function": {
|
||||
@@ -810,6 +817,14 @@ class GeminiCompletion(BaseLLM):
|
||||
}
|
||||
for call_data in function_calls.values()
|
||||
]
|
||||
self._emit_call_completed_event(
|
||||
response=formatted_function_calls,
|
||||
call_type=LLMCallType.TOOL_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=self._convert_contents_to_dict(contents),
|
||||
)
|
||||
return formatted_function_calls
|
||||
|
||||
# Handle completed function calls
|
||||
if function_calls and available_functions:
|
||||
|
||||
@@ -431,6 +431,13 @@ class OpenAICompletion(BaseLLM):
|
||||
# If there are tool_calls but no available_functions, return the tool_calls
|
||||
# This allows the caller (e.g., executor) to handle tool execution
|
||||
if message.tool_calls and not available_functions:
|
||||
self._emit_call_completed_event(
|
||||
response=list(message.tool_calls),
|
||||
call_type=LLMCallType.TOOL_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
return list(message.tool_calls)
|
||||
|
||||
# If there are tool_calls and available_functions, execute the tools
|
||||
@@ -734,9 +741,13 @@ class OpenAICompletion(BaseLLM):
|
||||
# If there are tool_calls but no available_functions, return the tool_calls
|
||||
# This allows the caller (e.g., executor) to handle tool execution
|
||||
if message.tool_calls and not available_functions:
|
||||
print("--------------------------------")
|
||||
print("lorenze tool_calls", list(message.tool_calls))
|
||||
print("--------------------------------")
|
||||
self._emit_call_completed_event(
|
||||
response=list(message.tool_calls),
|
||||
call_type=LLMCallType.TOOL_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
return list(message.tool_calls)
|
||||
|
||||
# If there are tool_calls and available_functions, execute the tools
|
||||
|
||||
@@ -66,11 +66,23 @@ def to_serializable(
|
||||
if key not in exclude
|
||||
}
|
||||
if isinstance(obj, BaseModel):
|
||||
return to_serializable(
|
||||
obj=obj.model_dump(exclude=exclude),
|
||||
max_depth=max_depth,
|
||||
_current_depth=_current_depth + 1,
|
||||
)
|
||||
try:
|
||||
return to_serializable(
|
||||
obj=obj.model_dump(exclude=exclude),
|
||||
max_depth=max_depth,
|
||||
_current_depth=_current_depth + 1,
|
||||
)
|
||||
except Exception:
|
||||
try:
|
||||
return {
|
||||
_to_serializable_key(k): to_serializable(
|
||||
v, max_depth=max_depth, _current_depth=_current_depth + 1
|
||||
)
|
||||
for k, v in obj.__dict__.items()
|
||||
if k not in (exclude or set())
|
||||
}
|
||||
except Exception:
|
||||
return repr(obj)
|
||||
return repr(obj)
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user