mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-28 17:48:13 +00:00
Compare commits
4 Commits
lorenze/im
...
gl/fix/goo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9ad2a9271f | ||
|
|
1e27cf3f0f | ||
|
|
cba6d2eb9b | ||
|
|
381ad3a9a8 |
@@ -152,4 +152,4 @@ __all__ = [
|
||||
"wrap_file_source",
|
||||
]
|
||||
|
||||
__version__ = "1.9.0"
|
||||
__version__ = "1.9.1"
|
||||
|
||||
@@ -12,7 +12,7 @@ dependencies = [
|
||||
"pytube~=15.0.0",
|
||||
"requests~=2.32.5",
|
||||
"docker~=7.1.0",
|
||||
"crewai==1.9.0",
|
||||
"crewai==1.9.1",
|
||||
"lancedb~=0.5.4",
|
||||
"tiktoken~=0.8.0",
|
||||
"beautifulsoup4~=4.13.4",
|
||||
|
||||
@@ -291,4 +291,4 @@ __all__ = [
|
||||
"ZapierActionTools",
|
||||
]
|
||||
|
||||
__version__ = "1.9.0"
|
||||
__version__ = "1.9.1"
|
||||
|
||||
@@ -49,7 +49,7 @@ Repository = "https://github.com/crewAIInc/crewAI"
|
||||
|
||||
[project.optional-dependencies]
|
||||
tools = [
|
||||
"crewai-tools==1.9.0",
|
||||
"crewai-tools==1.9.1",
|
||||
]
|
||||
embeddings = [
|
||||
"tiktoken~=0.8.0"
|
||||
|
||||
@@ -40,7 +40,7 @@ def _suppress_pydantic_deprecation_warnings() -> None:
|
||||
|
||||
_suppress_pydantic_deprecation_warnings()
|
||||
|
||||
__version__ = "1.9.0"
|
||||
__version__ = "1.9.1"
|
||||
_telemetry_submitted = False
|
||||
|
||||
|
||||
|
||||
@@ -37,7 +37,8 @@ class CrewAgentExecutorMixin:
|
||||
self.crew
|
||||
and self.agent
|
||||
and self.task
|
||||
and f"Action: {sanitize_tool_name('Delegate work to coworker')}" not in output.text
|
||||
and f"Action: {sanitize_tool_name('Delegate work to coworker')}"
|
||||
not in output.text
|
||||
):
|
||||
try:
|
||||
if (
|
||||
@@ -132,10 +133,11 @@ class CrewAgentExecutorMixin:
|
||||
and self.crew._long_term_memory
|
||||
and self.crew._entity_memory is None
|
||||
):
|
||||
self._printer.print(
|
||||
content="Long term memory is enabled, but entity memory is not enabled. Please configure entity memory or set memory=True to automatically enable it.",
|
||||
color="bold_yellow",
|
||||
)
|
||||
if self.agent and self.agent.verbose:
|
||||
self._printer.print(
|
||||
content="Long term memory is enabled, but entity memory is not enabled. Please configure entity memory or set memory=True to automatically enable it.",
|
||||
color="bold_yellow",
|
||||
)
|
||||
|
||||
def _ask_human_input(self, final_answer: str) -> str:
|
||||
"""Prompt human input with mode-appropriate messaging.
|
||||
|
||||
@@ -206,13 +206,14 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
try:
|
||||
formatted_answer = self._invoke_loop()
|
||||
except AssertionError:
|
||||
self._printer.print(
|
||||
content="Agent failed to reach a final answer. This is likely a bug - please report it.",
|
||||
color="red",
|
||||
)
|
||||
if self.agent.verbose:
|
||||
self._printer.print(
|
||||
content="Agent failed to reach a final answer. This is likely a bug - please report it.",
|
||||
color="red",
|
||||
)
|
||||
raise
|
||||
except Exception as e:
|
||||
handle_unknown_error(self._printer, e)
|
||||
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
|
||||
raise
|
||||
|
||||
if self.ask_for_human_input:
|
||||
@@ -327,6 +328,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
messages=self.messages,
|
||||
llm=self.llm,
|
||||
callbacks=self.callbacks,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
break
|
||||
|
||||
@@ -341,6 +343,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
from_agent=self.agent,
|
||||
response_model=self.response_model,
|
||||
executor_context=self,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
# breakpoint()
|
||||
if self.response_model is not None:
|
||||
@@ -399,6 +402,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
iterations=self.iterations,
|
||||
log_error_after=self.log_error_after,
|
||||
printer=self._printer,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
@@ -413,9 +417,10 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
llm=self.llm,
|
||||
callbacks=self.callbacks,
|
||||
i18n=self._i18n,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
continue
|
||||
handle_unknown_error(self._printer, e)
|
||||
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
|
||||
raise e
|
||||
finally:
|
||||
self.iterations += 1
|
||||
@@ -461,6 +466,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
messages=self.messages,
|
||||
llm=self.llm,
|
||||
callbacks=self.callbacks,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
@@ -482,6 +488,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
from_agent=self.agent,
|
||||
response_model=self.response_model,
|
||||
executor_context=self,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
|
||||
# Check if the response is a list of tool calls
|
||||
@@ -535,9 +542,10 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
llm=self.llm,
|
||||
callbacks=self.callbacks,
|
||||
i18n=self._i18n,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
continue
|
||||
handle_unknown_error(self._printer, e)
|
||||
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
|
||||
raise e
|
||||
finally:
|
||||
self.iterations += 1
|
||||
@@ -559,6 +567,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
from_agent=self.agent,
|
||||
response_model=self.response_model,
|
||||
executor_context=self,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
|
||||
formatted_answer = AgentFinish(
|
||||
@@ -755,10 +764,10 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
track_delegation_if_needed(func_name, args_dict, self.task)
|
||||
|
||||
# Find the structured tool for hook context
|
||||
structured_tool = None
|
||||
for tool in self.tools or []:
|
||||
if sanitize_tool_name(tool.name) == func_name:
|
||||
structured_tool = tool
|
||||
structured_tool: CrewStructuredTool | None = None
|
||||
for structured in self.tools or []:
|
||||
if sanitize_tool_name(structured.name) == func_name:
|
||||
structured_tool = structured
|
||||
break
|
||||
|
||||
# Execute before_tool_call hooks
|
||||
@@ -779,10 +788,11 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
hook_blocked = True
|
||||
break
|
||||
except Exception as hook_error:
|
||||
self._printer.print(
|
||||
content=f"Error in before_tool_call hook: {hook_error}",
|
||||
color="red",
|
||||
)
|
||||
if self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=f"Error in before_tool_call hook: {hook_error}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
# If hook blocked execution, set result and skip tool execution
|
||||
if hook_blocked:
|
||||
@@ -848,15 +858,16 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
after_hooks = get_after_tool_call_hooks()
|
||||
try:
|
||||
for after_hook in after_hooks:
|
||||
hook_result = after_hook(after_hook_context)
|
||||
if hook_result is not None:
|
||||
result = hook_result
|
||||
after_hook_result = after_hook(after_hook_context)
|
||||
if after_hook_result is not None:
|
||||
result = after_hook_result
|
||||
after_hook_context.tool_result = result
|
||||
except Exception as hook_error:
|
||||
self._printer.print(
|
||||
content=f"Error in after_tool_call hook: {hook_error}",
|
||||
color="red",
|
||||
)
|
||||
if self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=f"Error in after_tool_call hook: {hook_error}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
# Emit tool usage finished event
|
||||
crewai_event_bus.emit(
|
||||
@@ -942,13 +953,14 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
try:
|
||||
formatted_answer = await self._ainvoke_loop()
|
||||
except AssertionError:
|
||||
self._printer.print(
|
||||
content="Agent failed to reach a final answer. This is likely a bug - please report it.",
|
||||
color="red",
|
||||
)
|
||||
if self.agent.verbose:
|
||||
self._printer.print(
|
||||
content="Agent failed to reach a final answer. This is likely a bug - please report it.",
|
||||
color="red",
|
||||
)
|
||||
raise
|
||||
except Exception as e:
|
||||
handle_unknown_error(self._printer, e)
|
||||
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
|
||||
raise
|
||||
|
||||
if self.ask_for_human_input:
|
||||
@@ -999,6 +1011,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
messages=self.messages,
|
||||
llm=self.llm,
|
||||
callbacks=self.callbacks,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
break
|
||||
|
||||
@@ -1013,6 +1026,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
from_agent=self.agent,
|
||||
response_model=self.response_model,
|
||||
executor_context=self,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
|
||||
if self.response_model is not None:
|
||||
@@ -1070,6 +1084,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
iterations=self.iterations,
|
||||
log_error_after=self.log_error_after,
|
||||
printer=self._printer,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
@@ -1083,9 +1098,10 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
llm=self.llm,
|
||||
callbacks=self.callbacks,
|
||||
i18n=self._i18n,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
continue
|
||||
handle_unknown_error(self._printer, e)
|
||||
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
|
||||
raise e
|
||||
finally:
|
||||
self.iterations += 1
|
||||
@@ -1125,6 +1141,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
messages=self.messages,
|
||||
llm=self.llm,
|
||||
callbacks=self.callbacks,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
@@ -1146,6 +1163,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
from_agent=self.agent,
|
||||
response_model=self.response_model,
|
||||
executor_context=self,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
# Check if the response is a list of tool calls
|
||||
if (
|
||||
@@ -1198,9 +1216,10 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
llm=self.llm,
|
||||
callbacks=self.callbacks,
|
||||
i18n=self._i18n,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
continue
|
||||
handle_unknown_error(self._printer, e)
|
||||
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
|
||||
raise e
|
||||
finally:
|
||||
self.iterations += 1
|
||||
@@ -1222,6 +1241,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
from_agent=self.agent,
|
||||
response_model=self.response_model,
|
||||
executor_context=self,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
|
||||
formatted_answer = AgentFinish(
|
||||
@@ -1339,10 +1359,11 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
)
|
||||
|
||||
if train_iteration is None or not isinstance(train_iteration, int):
|
||||
self._printer.print(
|
||||
content="Invalid or missing train iteration. Cannot save training data.",
|
||||
color="red",
|
||||
)
|
||||
if self.agent.verbose:
|
||||
self._printer.print(
|
||||
content="Invalid or missing train iteration. Cannot save training data.",
|
||||
color="red",
|
||||
)
|
||||
return
|
||||
|
||||
training_handler = CrewTrainingHandler(TRAINING_DATA_FILE)
|
||||
@@ -1362,13 +1383,14 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
if train_iteration in agent_training_data:
|
||||
agent_training_data[train_iteration]["improved_output"] = result.output
|
||||
else:
|
||||
self._printer.print(
|
||||
content=(
|
||||
f"No existing training data for agent {agent_id} and iteration "
|
||||
f"{train_iteration}. Cannot save improved output."
|
||||
),
|
||||
color="red",
|
||||
)
|
||||
if self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=(
|
||||
f"No existing training data for agent {agent_id} and iteration "
|
||||
f"{train_iteration}. Cannot save improved output."
|
||||
),
|
||||
color="red",
|
||||
)
|
||||
return
|
||||
|
||||
# Update the training data and save
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]==1.9.0"
|
||||
"crewai[tools]==1.9.1"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]==1.9.0"
|
||||
"crewai[tools]==1.9.1"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -1,7 +1,3 @@
|
||||
from typing import Annotated
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from crewai.events.types.a2a_events import (
|
||||
A2AAgentCardFetchedEvent,
|
||||
A2AArtifactReceivedEvent,
|
||||
@@ -106,7 +102,7 @@ from crewai.events.types.tool_usage_events import (
|
||||
)
|
||||
|
||||
|
||||
EventTypes = Annotated[
|
||||
EventTypes = (
|
||||
A2AAgentCardFetchedEvent
|
||||
| A2AArtifactReceivedEvent
|
||||
| A2AAuthenticationFailedEvent
|
||||
@@ -184,6 +180,5 @@ EventTypes = Annotated[
|
||||
| MCPConnectionFailedEvent
|
||||
| MCPToolExecutionStartedEvent
|
||||
| MCPToolExecutionCompletedEvent
|
||||
| MCPToolExecutionFailedEvent,
|
||||
Field(discriminator="type"),
|
||||
]
|
||||
| MCPToolExecutionFailedEvent
|
||||
)
|
||||
|
||||
@@ -73,7 +73,7 @@ class A2ADelegationStartedEvent(A2AEventBase):
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: Literal["a2a_delegation_started"] = "a2a_delegation_started"
|
||||
type: str = "a2a_delegation_started"
|
||||
endpoint: str
|
||||
task_description: str
|
||||
agent_id: str
|
||||
@@ -106,7 +106,7 @@ class A2ADelegationCompletedEvent(A2AEventBase):
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: Literal["a2a_delegation_completed"] = "a2a_delegation_completed"
|
||||
type: str = "a2a_delegation_completed"
|
||||
status: str
|
||||
result: str | None = None
|
||||
error: str | None = None
|
||||
@@ -140,7 +140,7 @@ class A2AConversationStartedEvent(A2AEventBase):
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: Literal["a2a_conversation_started"] = "a2a_conversation_started"
|
||||
type: str = "a2a_conversation_started"
|
||||
agent_id: str
|
||||
endpoint: str
|
||||
context_id: str | None = None
|
||||
@@ -171,7 +171,7 @@ class A2AMessageSentEvent(A2AEventBase):
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: Literal["a2a_message_sent"] = "a2a_message_sent"
|
||||
type: str = "a2a_message_sent"
|
||||
message: str
|
||||
turn_number: int
|
||||
context_id: str | None = None
|
||||
@@ -203,7 +203,7 @@ class A2AResponseReceivedEvent(A2AEventBase):
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: Literal["a2a_response_received"] = "a2a_response_received"
|
||||
type: str = "a2a_response_received"
|
||||
response: str
|
||||
turn_number: int
|
||||
context_id: str | None = None
|
||||
@@ -237,7 +237,7 @@ class A2AConversationCompletedEvent(A2AEventBase):
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: Literal["a2a_conversation_completed"] = "a2a_conversation_completed"
|
||||
type: str = "a2a_conversation_completed"
|
||||
status: Literal["completed", "failed"]
|
||||
final_result: str | None = None
|
||||
error: str | None = None
|
||||
@@ -263,7 +263,7 @@ class A2APollingStartedEvent(A2AEventBase):
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: Literal["a2a_polling_started"] = "a2a_polling_started"
|
||||
type: str = "a2a_polling_started"
|
||||
task_id: str
|
||||
context_id: str | None = None
|
||||
polling_interval: float
|
||||
@@ -286,7 +286,7 @@ class A2APollingStatusEvent(A2AEventBase):
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: Literal["a2a_polling_status"] = "a2a_polling_status"
|
||||
type: str = "a2a_polling_status"
|
||||
task_id: str
|
||||
context_id: str | None = None
|
||||
state: str
|
||||
@@ -309,7 +309,7 @@ class A2APushNotificationRegisteredEvent(A2AEventBase):
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: Literal["a2a_push_notification_registered"] = "a2a_push_notification_registered"
|
||||
type: str = "a2a_push_notification_registered"
|
||||
task_id: str
|
||||
context_id: str | None = None
|
||||
callback_url: str
|
||||
@@ -334,7 +334,7 @@ class A2APushNotificationReceivedEvent(A2AEventBase):
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: Literal["a2a_push_notification_received"] = "a2a_push_notification_received"
|
||||
type: str = "a2a_push_notification_received"
|
||||
task_id: str
|
||||
context_id: str | None = None
|
||||
state: str
|
||||
@@ -359,7 +359,7 @@ class A2APushNotificationSentEvent(A2AEventBase):
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: Literal["a2a_push_notification_sent"] = "a2a_push_notification_sent"
|
||||
type: str = "a2a_push_notification_sent"
|
||||
task_id: str
|
||||
context_id: str | None = None
|
||||
callback_url: str
|
||||
@@ -381,7 +381,7 @@ class A2APushNotificationTimeoutEvent(A2AEventBase):
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: Literal["a2a_push_notification_timeout"] = "a2a_push_notification_timeout"
|
||||
type: str = "a2a_push_notification_timeout"
|
||||
task_id: str
|
||||
context_id: str | None = None
|
||||
timeout_seconds: float
|
||||
@@ -405,7 +405,7 @@ class A2AStreamingStartedEvent(A2AEventBase):
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: Literal["a2a_streaming_started"] = "a2a_streaming_started"
|
||||
type: str = "a2a_streaming_started"
|
||||
task_id: str | None = None
|
||||
context_id: str | None = None
|
||||
endpoint: str
|
||||
@@ -434,7 +434,7 @@ class A2AStreamingChunkEvent(A2AEventBase):
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: Literal["a2a_streaming_chunk"] = "a2a_streaming_chunk"
|
||||
type: str = "a2a_streaming_chunk"
|
||||
task_id: str | None = None
|
||||
context_id: str | None = None
|
||||
chunk: str
|
||||
@@ -462,7 +462,7 @@ class A2AAgentCardFetchedEvent(A2AEventBase):
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: Literal["a2a_agent_card_fetched"] = "a2a_agent_card_fetched"
|
||||
type: str = "a2a_agent_card_fetched"
|
||||
endpoint: str
|
||||
a2a_agent_name: str | None = None
|
||||
agent_card: dict[str, Any] | None = None
|
||||
@@ -486,7 +486,7 @@ class A2AAuthenticationFailedEvent(A2AEventBase):
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: Literal["a2a_authentication_failed"] = "a2a_authentication_failed"
|
||||
type: str = "a2a_authentication_failed"
|
||||
endpoint: str
|
||||
auth_type: str | None = None
|
||||
error: str
|
||||
@@ -517,7 +517,7 @@ class A2AArtifactReceivedEvent(A2AEventBase):
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: Literal["a2a_artifact_received"] = "a2a_artifact_received"
|
||||
type: str = "a2a_artifact_received"
|
||||
task_id: str
|
||||
artifact_id: str
|
||||
artifact_name: str | None = None
|
||||
@@ -550,7 +550,7 @@ class A2AConnectionErrorEvent(A2AEventBase):
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: Literal["a2a_connection_error"] = "a2a_connection_error"
|
||||
type: str = "a2a_connection_error"
|
||||
endpoint: str
|
||||
error: str
|
||||
error_type: str | None = None
|
||||
@@ -571,7 +571,7 @@ class A2AServerTaskStartedEvent(A2AEventBase):
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: Literal["a2a_server_task_started"] = "a2a_server_task_started"
|
||||
type: str = "a2a_server_task_started"
|
||||
task_id: str
|
||||
context_id: str
|
||||
metadata: dict[str, Any] | None = None
|
||||
@@ -587,7 +587,7 @@ class A2AServerTaskCompletedEvent(A2AEventBase):
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: Literal["a2a_server_task_completed"] = "a2a_server_task_completed"
|
||||
type: str = "a2a_server_task_completed"
|
||||
task_id: str
|
||||
context_id: str
|
||||
result: str
|
||||
@@ -603,7 +603,7 @@ class A2AServerTaskCanceledEvent(A2AEventBase):
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: Literal["a2a_server_task_canceled"] = "a2a_server_task_canceled"
|
||||
type: str = "a2a_server_task_canceled"
|
||||
task_id: str
|
||||
context_id: str
|
||||
metadata: dict[str, Any] | None = None
|
||||
@@ -619,7 +619,7 @@ class A2AServerTaskFailedEvent(A2AEventBase):
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: Literal["a2a_server_task_failed"] = "a2a_server_task_failed"
|
||||
type: str = "a2a_server_task_failed"
|
||||
task_id: str
|
||||
context_id: str
|
||||
error: str
|
||||
@@ -634,7 +634,7 @@ class A2AParallelDelegationStartedEvent(A2AEventBase):
|
||||
task_description: Description of the task being delegated.
|
||||
"""
|
||||
|
||||
type: Literal["a2a_parallel_delegation_started"] = "a2a_parallel_delegation_started"
|
||||
type: str = "a2a_parallel_delegation_started"
|
||||
endpoints: list[str]
|
||||
task_description: str
|
||||
|
||||
@@ -649,7 +649,7 @@ class A2AParallelDelegationCompletedEvent(A2AEventBase):
|
||||
results: Summary of results from each agent.
|
||||
"""
|
||||
|
||||
type: Literal["a2a_parallel_delegation_completed"] = "a2a_parallel_delegation_completed"
|
||||
type: str = "a2a_parallel_delegation_completed"
|
||||
endpoints: list[str]
|
||||
success_count: int
|
||||
failure_count: int
|
||||
|
||||
@@ -2,7 +2,8 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Literal
|
||||
from collections.abc import Sequence
|
||||
from typing import Any
|
||||
|
||||
from pydantic import ConfigDict, model_validator
|
||||
|
||||
@@ -17,9 +18,9 @@ class AgentExecutionStartedEvent(BaseEvent):
|
||||
|
||||
agent: BaseAgent
|
||||
task: Any
|
||||
tools: list[BaseTool | CrewStructuredTool] | None
|
||||
tools: Sequence[BaseTool | CrewStructuredTool] | None
|
||||
task_prompt: str
|
||||
type: Literal["agent_execution_started"] = "agent_execution_started"
|
||||
type: str = "agent_execution_started"
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
@@ -43,7 +44,7 @@ class AgentExecutionCompletedEvent(BaseEvent):
|
||||
agent: BaseAgent
|
||||
task: Any
|
||||
output: str
|
||||
type: Literal["agent_execution_completed"] = "agent_execution_completed"
|
||||
type: str = "agent_execution_completed"
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
@@ -67,7 +68,7 @@ class AgentExecutionErrorEvent(BaseEvent):
|
||||
agent: BaseAgent
|
||||
task: Any
|
||||
error: str
|
||||
type: Literal["agent_execution_error"] = "agent_execution_error"
|
||||
type: str = "agent_execution_error"
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
@@ -90,9 +91,9 @@ class LiteAgentExecutionStartedEvent(BaseEvent):
|
||||
"""Event emitted when a LiteAgent starts executing"""
|
||||
|
||||
agent_info: dict[str, Any]
|
||||
tools: list[BaseTool | CrewStructuredTool] | None
|
||||
tools: Sequence[BaseTool | CrewStructuredTool] | None
|
||||
messages: str | list[dict[str, str]]
|
||||
type: Literal["lite_agent_execution_started"] = "lite_agent_execution_started"
|
||||
type: str = "lite_agent_execution_started"
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
@@ -102,7 +103,7 @@ class LiteAgentExecutionCompletedEvent(BaseEvent):
|
||||
|
||||
agent_info: dict[str, Any]
|
||||
output: str
|
||||
type: Literal["lite_agent_execution_completed"] = "lite_agent_execution_completed"
|
||||
type: str = "lite_agent_execution_completed"
|
||||
|
||||
|
||||
class LiteAgentExecutionErrorEvent(BaseEvent):
|
||||
@@ -110,7 +111,7 @@ class LiteAgentExecutionErrorEvent(BaseEvent):
|
||||
|
||||
agent_info: dict[str, Any]
|
||||
error: str
|
||||
type: Literal["lite_agent_execution_error"] = "lite_agent_execution_error"
|
||||
type: str = "lite_agent_execution_error"
|
||||
|
||||
|
||||
# Agent Eval events
|
||||
@@ -119,7 +120,7 @@ class AgentEvaluationStartedEvent(BaseEvent):
|
||||
agent_role: str
|
||||
task_id: str | None = None
|
||||
iteration: int
|
||||
type: Literal["agent_evaluation_started"] = "agent_evaluation_started"
|
||||
type: str = "agent_evaluation_started"
|
||||
|
||||
|
||||
class AgentEvaluationCompletedEvent(BaseEvent):
|
||||
@@ -129,7 +130,7 @@ class AgentEvaluationCompletedEvent(BaseEvent):
|
||||
iteration: int
|
||||
metric_category: Any
|
||||
score: Any
|
||||
type: Literal["agent_evaluation_completed"] = "agent_evaluation_completed"
|
||||
type: str = "agent_evaluation_completed"
|
||||
|
||||
|
||||
class AgentEvaluationFailedEvent(BaseEvent):
|
||||
@@ -138,4 +139,4 @@ class AgentEvaluationFailedEvent(BaseEvent):
|
||||
task_id: str | None = None
|
||||
iteration: int
|
||||
error: str
|
||||
type: Literal["agent_evaluation_failed"] = "agent_evaluation_failed"
|
||||
type: str = "agent_evaluation_failed"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import TYPE_CHECKING, Any, Literal
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
@@ -40,14 +40,14 @@ class CrewKickoffStartedEvent(CrewBaseEvent):
|
||||
"""Event emitted when a crew starts execution"""
|
||||
|
||||
inputs: dict[str, Any] | None
|
||||
type: Literal["crew_kickoff_started"] = "crew_kickoff_started"
|
||||
type: str = "crew_kickoff_started"
|
||||
|
||||
|
||||
class CrewKickoffCompletedEvent(CrewBaseEvent):
|
||||
"""Event emitted when a crew completes execution"""
|
||||
|
||||
output: Any
|
||||
type: Literal["crew_kickoff_completed"] = "crew_kickoff_completed"
|
||||
type: str = "crew_kickoff_completed"
|
||||
total_tokens: int = 0
|
||||
|
||||
|
||||
@@ -55,7 +55,7 @@ class CrewKickoffFailedEvent(CrewBaseEvent):
|
||||
"""Event emitted when a crew fails to complete execution"""
|
||||
|
||||
error: str
|
||||
type: Literal["crew_kickoff_failed"] = "crew_kickoff_failed"
|
||||
type: str = "crew_kickoff_failed"
|
||||
|
||||
|
||||
class CrewTrainStartedEvent(CrewBaseEvent):
|
||||
@@ -64,7 +64,7 @@ class CrewTrainStartedEvent(CrewBaseEvent):
|
||||
n_iterations: int
|
||||
filename: str
|
||||
inputs: dict[str, Any] | None
|
||||
type: Literal["crew_train_started"] = "crew_train_started"
|
||||
type: str = "crew_train_started"
|
||||
|
||||
|
||||
class CrewTrainCompletedEvent(CrewBaseEvent):
|
||||
@@ -72,14 +72,14 @@ class CrewTrainCompletedEvent(CrewBaseEvent):
|
||||
|
||||
n_iterations: int
|
||||
filename: str
|
||||
type: Literal["crew_train_completed"] = "crew_train_completed"
|
||||
type: str = "crew_train_completed"
|
||||
|
||||
|
||||
class CrewTrainFailedEvent(CrewBaseEvent):
|
||||
"""Event emitted when a crew fails to complete training"""
|
||||
|
||||
error: str
|
||||
type: Literal["crew_train_failed"] = "crew_train_failed"
|
||||
type: str = "crew_train_failed"
|
||||
|
||||
|
||||
class CrewTestStartedEvent(CrewBaseEvent):
|
||||
@@ -88,20 +88,20 @@ class CrewTestStartedEvent(CrewBaseEvent):
|
||||
n_iterations: int
|
||||
eval_llm: str | Any | None
|
||||
inputs: dict[str, Any] | None
|
||||
type: Literal["crew_test_started"] = "crew_test_started"
|
||||
type: str = "crew_test_started"
|
||||
|
||||
|
||||
class CrewTestCompletedEvent(CrewBaseEvent):
|
||||
"""Event emitted when a crew completes testing"""
|
||||
|
||||
type: Literal["crew_test_completed"] = "crew_test_completed"
|
||||
type: str = "crew_test_completed"
|
||||
|
||||
|
||||
class CrewTestFailedEvent(CrewBaseEvent):
|
||||
"""Event emitted when a crew fails to complete testing"""
|
||||
|
||||
error: str
|
||||
type: Literal["crew_test_failed"] = "crew_test_failed"
|
||||
type: str = "crew_test_failed"
|
||||
|
||||
|
||||
class CrewTestResultEvent(CrewBaseEvent):
|
||||
@@ -110,4 +110,4 @@ class CrewTestResultEvent(CrewBaseEvent):
|
||||
quality: float
|
||||
execution_duration: float
|
||||
model: str
|
||||
type: Literal["crew_test_result"] = "crew_test_result"
|
||||
type: str = "crew_test_result"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, Literal
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, ConfigDict
|
||||
|
||||
@@ -17,14 +17,14 @@ class FlowStartedEvent(FlowEvent):
|
||||
|
||||
flow_name: str
|
||||
inputs: dict[str, Any] | None = None
|
||||
type: Literal["flow_started"] = "flow_started"
|
||||
type: str = "flow_started"
|
||||
|
||||
|
||||
class FlowCreatedEvent(FlowEvent):
|
||||
"""Event emitted when a flow is created"""
|
||||
|
||||
flow_name: str
|
||||
type: Literal["flow_created"] = "flow_created"
|
||||
type: str = "flow_created"
|
||||
|
||||
|
||||
class MethodExecutionStartedEvent(FlowEvent):
|
||||
@@ -34,7 +34,7 @@ class MethodExecutionStartedEvent(FlowEvent):
|
||||
method_name: str
|
||||
state: dict[str, Any] | BaseModel
|
||||
params: dict[str, Any] | None = None
|
||||
type: Literal["method_execution_started"] = "method_execution_started"
|
||||
type: str = "method_execution_started"
|
||||
|
||||
|
||||
class MethodExecutionFinishedEvent(FlowEvent):
|
||||
@@ -44,7 +44,7 @@ class MethodExecutionFinishedEvent(FlowEvent):
|
||||
method_name: str
|
||||
result: Any = None
|
||||
state: dict[str, Any] | BaseModel
|
||||
type: Literal["method_execution_finished"] = "method_execution_finished"
|
||||
type: str = "method_execution_finished"
|
||||
|
||||
|
||||
class MethodExecutionFailedEvent(FlowEvent):
|
||||
@@ -53,7 +53,7 @@ class MethodExecutionFailedEvent(FlowEvent):
|
||||
flow_name: str
|
||||
method_name: str
|
||||
error: Exception
|
||||
type: Literal["method_execution_failed"] = "method_execution_failed"
|
||||
type: str = "method_execution_failed"
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
@@ -78,7 +78,7 @@ class MethodExecutionPausedEvent(FlowEvent):
|
||||
flow_id: str
|
||||
message: str
|
||||
emit: list[str] | None = None
|
||||
type: Literal["method_execution_paused"] = "method_execution_paused"
|
||||
type: str = "method_execution_paused"
|
||||
|
||||
|
||||
class FlowFinishedEvent(FlowEvent):
|
||||
@@ -86,7 +86,7 @@ class FlowFinishedEvent(FlowEvent):
|
||||
|
||||
flow_name: str
|
||||
result: Any | None = None
|
||||
type: Literal["flow_finished"] = "flow_finished"
|
||||
type: str = "flow_finished"
|
||||
state: dict[str, Any] | BaseModel
|
||||
|
||||
|
||||
@@ -110,14 +110,14 @@ class FlowPausedEvent(FlowEvent):
|
||||
state: dict[str, Any] | BaseModel
|
||||
message: str
|
||||
emit: list[str] | None = None
|
||||
type: Literal["flow_paused"] = "flow_paused"
|
||||
type: str = "flow_paused"
|
||||
|
||||
|
||||
class FlowPlotEvent(FlowEvent):
|
||||
"""Event emitted when a flow plot is created"""
|
||||
|
||||
flow_name: str
|
||||
type: Literal["flow_plot"] = "flow_plot"
|
||||
type: str = "flow_plot"
|
||||
|
||||
|
||||
class HumanFeedbackRequestedEvent(FlowEvent):
|
||||
@@ -138,7 +138,7 @@ class HumanFeedbackRequestedEvent(FlowEvent):
|
||||
output: Any
|
||||
message: str
|
||||
emit: list[str] | None = None
|
||||
type: Literal["human_feedback_requested"] = "human_feedback_requested"
|
||||
type: str = "human_feedback_requested"
|
||||
|
||||
|
||||
class HumanFeedbackReceivedEvent(FlowEvent):
|
||||
@@ -157,4 +157,4 @@ class HumanFeedbackReceivedEvent(FlowEvent):
|
||||
method_name: str
|
||||
feedback: str
|
||||
outcome: str | None = None
|
||||
type: Literal["human_feedback_received"] = "human_feedback_received"
|
||||
type: str = "human_feedback_received"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, Literal
|
||||
from typing import Any
|
||||
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
@@ -20,14 +20,14 @@ class KnowledgeEventBase(BaseEvent):
|
||||
class KnowledgeRetrievalStartedEvent(KnowledgeEventBase):
|
||||
"""Event emitted when a knowledge retrieval is started."""
|
||||
|
||||
type: Literal["knowledge_search_query_started"] = "knowledge_search_query_started"
|
||||
type: str = "knowledge_search_query_started"
|
||||
|
||||
|
||||
class KnowledgeRetrievalCompletedEvent(KnowledgeEventBase):
|
||||
"""Event emitted when a knowledge retrieval is completed."""
|
||||
|
||||
query: str
|
||||
type: Literal["knowledge_search_query_completed"] = "knowledge_search_query_completed"
|
||||
type: str = "knowledge_search_query_completed"
|
||||
retrieved_knowledge: str
|
||||
|
||||
|
||||
@@ -35,13 +35,13 @@ class KnowledgeQueryStartedEvent(KnowledgeEventBase):
|
||||
"""Event emitted when a knowledge query is started."""
|
||||
|
||||
task_prompt: str
|
||||
type: Literal["knowledge_query_started"] = "knowledge_query_started"
|
||||
type: str = "knowledge_query_started"
|
||||
|
||||
|
||||
class KnowledgeQueryFailedEvent(KnowledgeEventBase):
|
||||
"""Event emitted when a knowledge query fails."""
|
||||
|
||||
type: Literal["knowledge_query_failed"] = "knowledge_query_failed"
|
||||
type: str = "knowledge_query_failed"
|
||||
error: str
|
||||
|
||||
|
||||
@@ -49,12 +49,12 @@ class KnowledgeQueryCompletedEvent(KnowledgeEventBase):
|
||||
"""Event emitted when a knowledge query is completed."""
|
||||
|
||||
query: str
|
||||
type: Literal["knowledge_query_completed"] = "knowledge_query_completed"
|
||||
type: str = "knowledge_query_completed"
|
||||
|
||||
|
||||
class KnowledgeSearchQueryFailedEvent(KnowledgeEventBase):
|
||||
"""Event emitted when a knowledge search query fails."""
|
||||
|
||||
query: str
|
||||
type: Literal["knowledge_search_query_failed"] = "knowledge_search_query_failed"
|
||||
type: str = "knowledge_search_query_failed"
|
||||
error: str
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from enum import Enum
|
||||
from typing import Any, Literal
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
@@ -42,7 +42,7 @@ class LLMCallStartedEvent(LLMEventBase):
|
||||
multimodal content (text, images, etc.)
|
||||
"""
|
||||
|
||||
type: Literal["llm_call_started"] = "llm_call_started"
|
||||
type: str = "llm_call_started"
|
||||
messages: str | list[dict[str, Any]] | None = None
|
||||
tools: list[dict[str, Any]] | None = None
|
||||
callbacks: list[Any] | None = None
|
||||
@@ -52,7 +52,7 @@ class LLMCallStartedEvent(LLMEventBase):
|
||||
class LLMCallCompletedEvent(LLMEventBase):
|
||||
"""Event emitted when a LLM call completes"""
|
||||
|
||||
type: Literal["llm_call_completed"] = "llm_call_completed"
|
||||
type: str = "llm_call_completed"
|
||||
messages: str | list[dict[str, Any]] | None = None
|
||||
response: Any
|
||||
call_type: LLMCallType
|
||||
@@ -62,7 +62,7 @@ class LLMCallFailedEvent(LLMEventBase):
|
||||
"""Event emitted when a LLM call fails"""
|
||||
|
||||
error: str
|
||||
type: Literal["llm_call_failed"] = "llm_call_failed"
|
||||
type: str = "llm_call_failed"
|
||||
|
||||
|
||||
class FunctionCall(BaseModel):
|
||||
@@ -80,7 +80,7 @@ class ToolCall(BaseModel):
|
||||
class LLMStreamChunkEvent(LLMEventBase):
|
||||
"""Event emitted when a streaming chunk is received"""
|
||||
|
||||
type: Literal["llm_stream_chunk"] = "llm_stream_chunk"
|
||||
type: str = "llm_stream_chunk"
|
||||
chunk: str
|
||||
tool_call: ToolCall | None = None
|
||||
call_type: LLMCallType | None = None
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from collections.abc import Callable
|
||||
from inspect import getsource
|
||||
from typing import Any, Literal
|
||||
from typing import Any
|
||||
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
@@ -27,7 +27,7 @@ class LLMGuardrailStartedEvent(LLMGuardrailBaseEvent):
|
||||
retry_count: The number of times the guardrail has been retried
|
||||
"""
|
||||
|
||||
type: Literal["llm_guardrail_started"] = "llm_guardrail_started"
|
||||
type: str = "llm_guardrail_started"
|
||||
guardrail: str | Callable
|
||||
retry_count: int
|
||||
|
||||
@@ -53,7 +53,7 @@ class LLMGuardrailCompletedEvent(LLMGuardrailBaseEvent):
|
||||
retry_count: The number of times the guardrail has been retried
|
||||
"""
|
||||
|
||||
type: Literal["llm_guardrail_completed"] = "llm_guardrail_completed"
|
||||
type: str = "llm_guardrail_completed"
|
||||
success: bool
|
||||
result: Any
|
||||
error: str | None = None
|
||||
@@ -68,6 +68,6 @@ class LLMGuardrailFailedEvent(LLMGuardrailBaseEvent):
|
||||
retry_count: The number of times the guardrail has been retried
|
||||
"""
|
||||
|
||||
type: Literal["llm_guardrail_failed"] = "llm_guardrail_failed"
|
||||
type: str = "llm_guardrail_failed"
|
||||
error: str
|
||||
retry_count: int
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
"""Agent logging events that don't reference BaseAgent to avoid circular imports."""
|
||||
|
||||
from typing import Any, Literal
|
||||
from typing import Any
|
||||
|
||||
from pydantic import ConfigDict
|
||||
|
||||
@@ -13,7 +13,7 @@ class AgentLogsStartedEvent(BaseEvent):
|
||||
agent_role: str
|
||||
task_description: str | None = None
|
||||
verbose: bool = False
|
||||
type: Literal["agent_logs_started"] = "agent_logs_started"
|
||||
type: str = "agent_logs_started"
|
||||
|
||||
|
||||
class AgentLogsExecutionEvent(BaseEvent):
|
||||
@@ -22,6 +22,6 @@ class AgentLogsExecutionEvent(BaseEvent):
|
||||
agent_role: str
|
||||
formatted_answer: Any
|
||||
verbose: bool = False
|
||||
type: Literal["agent_logs_execution"] = "agent_logs_execution"
|
||||
type: str = "agent_logs_execution"
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from datetime import datetime
|
||||
from typing import Any, Literal
|
||||
from typing import Any
|
||||
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
@@ -24,7 +24,7 @@ class MCPEvent(BaseEvent):
|
||||
class MCPConnectionStartedEvent(MCPEvent):
|
||||
"""Event emitted when starting to connect to an MCP server."""
|
||||
|
||||
type: Literal["mcp_connection_started"] = "mcp_connection_started"
|
||||
type: str = "mcp_connection_started"
|
||||
connect_timeout: int | None = None
|
||||
is_reconnect: bool = (
|
||||
False # True if this is a reconnection, False for first connection
|
||||
@@ -34,7 +34,7 @@ class MCPConnectionStartedEvent(MCPEvent):
|
||||
class MCPConnectionCompletedEvent(MCPEvent):
|
||||
"""Event emitted when successfully connected to an MCP server."""
|
||||
|
||||
type: Literal["mcp_connection_completed"] = "mcp_connection_completed"
|
||||
type: str = "mcp_connection_completed"
|
||||
started_at: datetime | None = None
|
||||
completed_at: datetime | None = None
|
||||
connection_duration_ms: float | None = None
|
||||
@@ -46,7 +46,7 @@ class MCPConnectionCompletedEvent(MCPEvent):
|
||||
class MCPConnectionFailedEvent(MCPEvent):
|
||||
"""Event emitted when connection to an MCP server fails."""
|
||||
|
||||
type: Literal["mcp_connection_failed"] = "mcp_connection_failed"
|
||||
type: str = "mcp_connection_failed"
|
||||
error: str
|
||||
error_type: str | None = None # "timeout", "authentication", "network", etc.
|
||||
started_at: datetime | None = None
|
||||
@@ -56,7 +56,7 @@ class MCPConnectionFailedEvent(MCPEvent):
|
||||
class MCPToolExecutionStartedEvent(MCPEvent):
|
||||
"""Event emitted when starting to execute an MCP tool."""
|
||||
|
||||
type: Literal["mcp_tool_execution_started"] = "mcp_tool_execution_started"
|
||||
type: str = "mcp_tool_execution_started"
|
||||
tool_name: str
|
||||
tool_args: dict[str, Any] | None = None
|
||||
|
||||
@@ -64,7 +64,7 @@ class MCPToolExecutionStartedEvent(MCPEvent):
|
||||
class MCPToolExecutionCompletedEvent(MCPEvent):
|
||||
"""Event emitted when MCP tool execution completes."""
|
||||
|
||||
type: Literal["mcp_tool_execution_completed"] = "mcp_tool_execution_completed"
|
||||
type: str = "mcp_tool_execution_completed"
|
||||
tool_name: str
|
||||
tool_args: dict[str, Any] | None = None
|
||||
result: Any | None = None
|
||||
@@ -76,7 +76,7 @@ class MCPToolExecutionCompletedEvent(MCPEvent):
|
||||
class MCPToolExecutionFailedEvent(MCPEvent):
|
||||
"""Event emitted when MCP tool execution fails."""
|
||||
|
||||
type: Literal["mcp_tool_execution_failed"] = "mcp_tool_execution_failed"
|
||||
type: str = "mcp_tool_execution_failed"
|
||||
tool_name: str
|
||||
tool_args: dict[str, Any] | None = None
|
||||
error: str
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, Literal
|
||||
from typing import Any
|
||||
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
@@ -23,7 +23,7 @@ class MemoryBaseEvent(BaseEvent):
|
||||
class MemoryQueryStartedEvent(MemoryBaseEvent):
|
||||
"""Event emitted when a memory query is started"""
|
||||
|
||||
type: Literal["memory_query_started"] = "memory_query_started"
|
||||
type: str = "memory_query_started"
|
||||
query: str
|
||||
limit: int
|
||||
score_threshold: float | None = None
|
||||
@@ -32,7 +32,7 @@ class MemoryQueryStartedEvent(MemoryBaseEvent):
|
||||
class MemoryQueryCompletedEvent(MemoryBaseEvent):
|
||||
"""Event emitted when a memory query is completed successfully"""
|
||||
|
||||
type: Literal["memory_query_completed"] = "memory_query_completed"
|
||||
type: str = "memory_query_completed"
|
||||
query: str
|
||||
results: Any
|
||||
limit: int
|
||||
@@ -43,7 +43,7 @@ class MemoryQueryCompletedEvent(MemoryBaseEvent):
|
||||
class MemoryQueryFailedEvent(MemoryBaseEvent):
|
||||
"""Event emitted when a memory query fails"""
|
||||
|
||||
type: Literal["memory_query_failed"] = "memory_query_failed"
|
||||
type: str = "memory_query_failed"
|
||||
query: str
|
||||
limit: int
|
||||
score_threshold: float | None = None
|
||||
@@ -53,7 +53,7 @@ class MemoryQueryFailedEvent(MemoryBaseEvent):
|
||||
class MemorySaveStartedEvent(MemoryBaseEvent):
|
||||
"""Event emitted when a memory save operation is started"""
|
||||
|
||||
type: Literal["memory_save_started"] = "memory_save_started"
|
||||
type: str = "memory_save_started"
|
||||
value: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
agent_role: str | None = None
|
||||
@@ -62,7 +62,7 @@ class MemorySaveStartedEvent(MemoryBaseEvent):
|
||||
class MemorySaveCompletedEvent(MemoryBaseEvent):
|
||||
"""Event emitted when a memory save operation is completed successfully"""
|
||||
|
||||
type: Literal["memory_save_completed"] = "memory_save_completed"
|
||||
type: str = "memory_save_completed"
|
||||
value: str
|
||||
metadata: dict[str, Any] | None = None
|
||||
agent_role: str | None = None
|
||||
@@ -72,7 +72,7 @@ class MemorySaveCompletedEvent(MemoryBaseEvent):
|
||||
class MemorySaveFailedEvent(MemoryBaseEvent):
|
||||
"""Event emitted when a memory save operation fails"""
|
||||
|
||||
type: Literal["memory_save_failed"] = "memory_save_failed"
|
||||
type: str = "memory_save_failed"
|
||||
value: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
agent_role: str | None = None
|
||||
@@ -82,14 +82,14 @@ class MemorySaveFailedEvent(MemoryBaseEvent):
|
||||
class MemoryRetrievalStartedEvent(MemoryBaseEvent):
|
||||
"""Event emitted when memory retrieval for a task prompt starts"""
|
||||
|
||||
type: Literal["memory_retrieval_started"] = "memory_retrieval_started"
|
||||
type: str = "memory_retrieval_started"
|
||||
task_id: str | None = None
|
||||
|
||||
|
||||
class MemoryRetrievalCompletedEvent(MemoryBaseEvent):
|
||||
"""Event emitted when memory retrieval for a task prompt completes successfully"""
|
||||
|
||||
type: Literal["memory_retrieval_completed"] = "memory_retrieval_completed"
|
||||
type: str = "memory_retrieval_completed"
|
||||
task_id: str | None = None
|
||||
memory_content: str
|
||||
retrieval_time_ms: float
|
||||
@@ -98,6 +98,6 @@ class MemoryRetrievalCompletedEvent(MemoryBaseEvent):
|
||||
class MemoryRetrievalFailedEvent(MemoryBaseEvent):
|
||||
"""Event emitted when memory retrieval for a task prompt fails."""
|
||||
|
||||
type: Literal["memory_retrieval_failed"] = "memory_retrieval_failed"
|
||||
type: str = "memory_retrieval_failed"
|
||||
task_id: str | None = None
|
||||
error: str
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, Literal
|
||||
from typing import Any
|
||||
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
@@ -24,7 +24,7 @@ class ReasoningEvent(BaseEvent):
|
||||
class AgentReasoningStartedEvent(ReasoningEvent):
|
||||
"""Event emitted when an agent starts reasoning about a task."""
|
||||
|
||||
type: Literal["agent_reasoning_started"] = "agent_reasoning_started"
|
||||
type: str = "agent_reasoning_started"
|
||||
agent_role: str
|
||||
task_id: str
|
||||
|
||||
@@ -32,7 +32,7 @@ class AgentReasoningStartedEvent(ReasoningEvent):
|
||||
class AgentReasoningCompletedEvent(ReasoningEvent):
|
||||
"""Event emitted when an agent finishes its reasoning process."""
|
||||
|
||||
type: Literal["agent_reasoning_completed"] = "agent_reasoning_completed"
|
||||
type: str = "agent_reasoning_completed"
|
||||
agent_role: str
|
||||
task_id: str
|
||||
plan: str
|
||||
@@ -42,7 +42,7 @@ class AgentReasoningCompletedEvent(ReasoningEvent):
|
||||
class AgentReasoningFailedEvent(ReasoningEvent):
|
||||
"""Event emitted when the reasoning process fails."""
|
||||
|
||||
type: Literal["agent_reasoning_failed"] = "agent_reasoning_failed"
|
||||
type: str = "agent_reasoning_failed"
|
||||
agent_role: str
|
||||
task_id: str
|
||||
error: str
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, Literal
|
||||
from typing import Any
|
||||
|
||||
from crewai.events.base_events import BaseEvent
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
@@ -7,7 +7,7 @@ from crewai.tasks.task_output import TaskOutput
|
||||
class TaskStartedEvent(BaseEvent):
|
||||
"""Event emitted when a task starts"""
|
||||
|
||||
type: Literal["task_started"] = "task_started"
|
||||
type: str = "task_started"
|
||||
context: str | None
|
||||
task: Any | None = None
|
||||
|
||||
@@ -28,7 +28,7 @@ class TaskCompletedEvent(BaseEvent):
|
||||
"""Event emitted when a task completes"""
|
||||
|
||||
output: TaskOutput
|
||||
type: Literal["task_completed"] = "task_completed"
|
||||
type: str = "task_completed"
|
||||
task: Any | None = None
|
||||
|
||||
def __init__(self, **data):
|
||||
@@ -48,7 +48,7 @@ class TaskFailedEvent(BaseEvent):
|
||||
"""Event emitted when a task fails"""
|
||||
|
||||
error: str
|
||||
type: Literal["task_failed"] = "task_failed"
|
||||
type: str = "task_failed"
|
||||
task: Any | None = None
|
||||
|
||||
def __init__(self, **data):
|
||||
@@ -67,7 +67,7 @@ class TaskFailedEvent(BaseEvent):
|
||||
class TaskEvaluationEvent(BaseEvent):
|
||||
"""Event emitted when a task evaluation is completed"""
|
||||
|
||||
type: Literal["task_evaluation"] = "task_evaluation"
|
||||
type: str = "task_evaluation"
|
||||
evaluation_type: str
|
||||
task: Any | None = None
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from collections.abc import Callable
|
||||
from datetime import datetime
|
||||
from typing import Any, Literal
|
||||
from typing import Any
|
||||
|
||||
from pydantic import ConfigDict
|
||||
|
||||
@@ -55,7 +55,7 @@ class ToolUsageEvent(BaseEvent):
|
||||
class ToolUsageStartedEvent(ToolUsageEvent):
|
||||
"""Event emitted when a tool execution is started"""
|
||||
|
||||
type: Literal["tool_usage_started"] = "tool_usage_started"
|
||||
type: str = "tool_usage_started"
|
||||
|
||||
|
||||
class ToolUsageFinishedEvent(ToolUsageEvent):
|
||||
@@ -65,35 +65,35 @@ class ToolUsageFinishedEvent(ToolUsageEvent):
|
||||
finished_at: datetime
|
||||
from_cache: bool = False
|
||||
output: Any
|
||||
type: Literal["tool_usage_finished"] = "tool_usage_finished"
|
||||
type: str = "tool_usage_finished"
|
||||
|
||||
|
||||
class ToolUsageErrorEvent(ToolUsageEvent):
|
||||
"""Event emitted when a tool execution encounters an error"""
|
||||
|
||||
error: Any
|
||||
type: Literal["tool_usage_error"] = "tool_usage_error"
|
||||
type: str = "tool_usage_error"
|
||||
|
||||
|
||||
class ToolValidateInputErrorEvent(ToolUsageEvent):
|
||||
"""Event emitted when a tool input validation encounters an error"""
|
||||
|
||||
error: Any
|
||||
type: Literal["tool_validate_input_error"] = "tool_validate_input_error"
|
||||
type: str = "tool_validate_input_error"
|
||||
|
||||
|
||||
class ToolSelectionErrorEvent(ToolUsageEvent):
|
||||
"""Event emitted when a tool selection encounters an error"""
|
||||
|
||||
error: Any
|
||||
type: Literal["tool_selection_error"] = "tool_selection_error"
|
||||
type: str = "tool_selection_error"
|
||||
|
||||
|
||||
class ToolExecutionErrorEvent(BaseEvent):
|
||||
"""Event emitted when a tool execution encounters an error"""
|
||||
|
||||
error: Any
|
||||
type: Literal["tool_execution_error"] = "tool_execution_error"
|
||||
type: str = "tool_execution_error"
|
||||
tool_name: str
|
||||
tool_args: dict[str, Any]
|
||||
tool_class: Callable
|
||||
|
||||
@@ -341,6 +341,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
messages=list(self.state.messages),
|
||||
llm=self.llm,
|
||||
callbacks=self.callbacks,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
|
||||
self.state.current_answer = formatted_answer
|
||||
@@ -366,6 +367,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
from_agent=self.agent,
|
||||
response_model=None,
|
||||
executor_context=self,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
|
||||
# Parse the LLM response
|
||||
@@ -401,7 +403,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
return "context_error"
|
||||
if e.__class__.__module__.startswith("litellm"):
|
||||
raise e
|
||||
handle_unknown_error(self._printer, e)
|
||||
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
|
||||
raise
|
||||
|
||||
@listen("continue_reasoning_native")
|
||||
@@ -436,6 +438,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
from_agent=self.agent,
|
||||
response_model=None,
|
||||
executor_context=self,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
|
||||
# Check if the response is a list of tool calls
|
||||
@@ -474,7 +477,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
return "context_error"
|
||||
if e.__class__.__module__.startswith("litellm"):
|
||||
raise e
|
||||
handle_unknown_error(self._printer, e)
|
||||
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
|
||||
raise
|
||||
|
||||
@router(call_llm_and_parse)
|
||||
@@ -670,10 +673,10 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
|
||||
track_delegation_if_needed(func_name, args_dict, self.task)
|
||||
|
||||
structured_tool = None
|
||||
for tool in self.tools or []:
|
||||
if sanitize_tool_name(tool.name) == func_name:
|
||||
structured_tool = tool
|
||||
structured_tool: CrewStructuredTool | None = None
|
||||
for structured in self.tools or []:
|
||||
if sanitize_tool_name(structured.name) == func_name:
|
||||
structured_tool = structured
|
||||
break
|
||||
|
||||
hook_blocked = False
|
||||
@@ -693,10 +696,11 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
hook_blocked = True
|
||||
break
|
||||
except Exception as hook_error:
|
||||
self._printer.print(
|
||||
content=f"Error in before_tool_call hook: {hook_error}",
|
||||
color="red",
|
||||
)
|
||||
if self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=f"Error in before_tool_call hook: {hook_error}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
if hook_blocked:
|
||||
result = f"Tool execution blocked by hook. Tool: {func_name}"
|
||||
@@ -758,15 +762,16 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
after_hooks = get_after_tool_call_hooks()
|
||||
try:
|
||||
for after_hook in after_hooks:
|
||||
hook_result = after_hook(after_hook_context)
|
||||
if hook_result is not None:
|
||||
result = hook_result
|
||||
after_hook_result = after_hook(after_hook_context)
|
||||
if after_hook_result is not None:
|
||||
result = after_hook_result
|
||||
after_hook_context.tool_result = result
|
||||
except Exception as hook_error:
|
||||
self._printer.print(
|
||||
content=f"Error in after_tool_call hook: {hook_error}",
|
||||
color="red",
|
||||
)
|
||||
if self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=f"Error in after_tool_call hook: {hook_error}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
# Emit tool usage finished event
|
||||
crewai_event_bus.emit(
|
||||
@@ -911,6 +916,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
iterations=self.state.iterations,
|
||||
log_error_after=self.log_error_after,
|
||||
printer=self._printer,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
|
||||
if formatted_answer:
|
||||
@@ -930,6 +936,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
llm=self.llm,
|
||||
callbacks=self.callbacks,
|
||||
i18n=self._i18n,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
|
||||
self.state.iterations += 1
|
||||
@@ -1021,7 +1028,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
self._console.print(fail_text)
|
||||
raise
|
||||
except Exception as e:
|
||||
handle_unknown_error(self._printer, e)
|
||||
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
|
||||
raise
|
||||
finally:
|
||||
self._is_executing = False
|
||||
@@ -1106,7 +1113,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
self._console.print(fail_text)
|
||||
raise
|
||||
except Exception as e:
|
||||
handle_unknown_error(self._printer, e)
|
||||
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
|
||||
raise
|
||||
finally:
|
||||
self._is_executing = False
|
||||
|
||||
@@ -7,7 +7,7 @@ for building event-driven workflows with conditional execution and routing.
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Callable
|
||||
from collections.abc import Callable, Sequence
|
||||
from concurrent.futures import Future
|
||||
import copy
|
||||
import inspect
|
||||
@@ -2382,7 +2382,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
message: str,
|
||||
output: Any,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
emit: list[str] | None = None,
|
||||
emit: Sequence[str] | None = None,
|
||||
) -> str:
|
||||
"""Request feedback from a human.
|
||||
Args:
|
||||
@@ -2453,7 +2453,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
def _collapse_to_outcome(
|
||||
self,
|
||||
feedback: str,
|
||||
outcomes: list[str],
|
||||
outcomes: Sequence[str],
|
||||
llm: str | BaseLLM,
|
||||
) -> str:
|
||||
"""Collapse free-form feedback to a predefined outcome using LLM.
|
||||
|
||||
@@ -53,7 +53,7 @@ Example (asynchronous with custom provider):
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Callable
|
||||
from collections.abc import Callable, Sequence
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from functools import wraps
|
||||
@@ -128,7 +128,7 @@ class HumanFeedbackConfig:
|
||||
"""
|
||||
|
||||
message: str
|
||||
emit: list[str] | None = None
|
||||
emit: Sequence[str] | None = None
|
||||
llm: str | BaseLLM | None = None
|
||||
default_outcome: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
@@ -154,7 +154,7 @@ class HumanFeedbackMethod(FlowMethod[Any, Any]):
|
||||
|
||||
def human_feedback(
|
||||
message: str,
|
||||
emit: list[str] | None = None,
|
||||
emit: Sequence[str] | None = None,
|
||||
llm: str | BaseLLM | None = None,
|
||||
default_outcome: str | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
|
||||
@@ -118,17 +118,20 @@ class PersistenceDecorator:
|
||||
)
|
||||
except Exception as e:
|
||||
error_msg = LOG_MESSAGES["save_error"].format(method_name, str(e))
|
||||
cls._printer.print(error_msg, color="red")
|
||||
if verbose:
|
||||
cls._printer.print(error_msg, color="red")
|
||||
logger.error(error_msg)
|
||||
raise RuntimeError(f"State persistence failed: {e!s}") from e
|
||||
except AttributeError as e:
|
||||
error_msg = LOG_MESSAGES["state_missing"]
|
||||
cls._printer.print(error_msg, color="red")
|
||||
if verbose:
|
||||
cls._printer.print(error_msg, color="red")
|
||||
logger.error(error_msg)
|
||||
raise ValueError(error_msg) from e
|
||||
except (TypeError, ValueError) as e:
|
||||
error_msg = LOG_MESSAGES["id_missing"]
|
||||
cls._printer.print(error_msg, color="red")
|
||||
if verbose:
|
||||
cls._printer.print(error_msg, color="red")
|
||||
logger.error(error_msg)
|
||||
raise ValueError(error_msg) from e
|
||||
|
||||
|
||||
@@ -151,7 +151,9 @@ def _unwrap_function(function: Any) -> Any:
|
||||
return function
|
||||
|
||||
|
||||
def get_possible_return_constants(function: Any) -> list[str] | None:
|
||||
def get_possible_return_constants(
|
||||
function: Any, verbose: bool = True
|
||||
) -> list[str] | None:
|
||||
"""Extract possible string return values from a function using AST parsing.
|
||||
|
||||
This function analyzes the source code of a router method to identify
|
||||
@@ -178,10 +180,11 @@ def get_possible_return_constants(function: Any) -> list[str] | None:
|
||||
# Can't get source code
|
||||
return None
|
||||
except Exception as e:
|
||||
_printer.print(
|
||||
f"Error retrieving source code for function {function.__name__}: {e}",
|
||||
color="red",
|
||||
)
|
||||
if verbose:
|
||||
_printer.print(
|
||||
f"Error retrieving source code for function {function.__name__}: {e}",
|
||||
color="red",
|
||||
)
|
||||
return None
|
||||
|
||||
try:
|
||||
@@ -190,25 +193,28 @@ def get_possible_return_constants(function: Any) -> list[str] | None:
|
||||
# Parse the source code into an AST
|
||||
code_ast = ast.parse(source)
|
||||
except IndentationError as e:
|
||||
_printer.print(
|
||||
f"IndentationError while parsing source code of {function.__name__}: {e}",
|
||||
color="red",
|
||||
)
|
||||
_printer.print(f"Source code:\n{source}", color="yellow")
|
||||
if verbose:
|
||||
_printer.print(
|
||||
f"IndentationError while parsing source code of {function.__name__}: {e}",
|
||||
color="red",
|
||||
)
|
||||
_printer.print(f"Source code:\n{source}", color="yellow")
|
||||
return None
|
||||
except SyntaxError as e:
|
||||
_printer.print(
|
||||
f"SyntaxError while parsing source code of {function.__name__}: {e}",
|
||||
color="red",
|
||||
)
|
||||
_printer.print(f"Source code:\n{source}", color="yellow")
|
||||
if verbose:
|
||||
_printer.print(
|
||||
f"SyntaxError while parsing source code of {function.__name__}: {e}",
|
||||
color="red",
|
||||
)
|
||||
_printer.print(f"Source code:\n{source}", color="yellow")
|
||||
return None
|
||||
except Exception as e:
|
||||
_printer.print(
|
||||
f"Unexpected error while parsing source code of {function.__name__}: {e}",
|
||||
color="red",
|
||||
)
|
||||
_printer.print(f"Source code:\n{source}", color="yellow")
|
||||
if verbose:
|
||||
_printer.print(
|
||||
f"Unexpected error while parsing source code of {function.__name__}: {e}",
|
||||
color="red",
|
||||
)
|
||||
_printer.print(f"Source code:\n{source}", color="yellow")
|
||||
return None
|
||||
|
||||
return_values: set[str] = set()
|
||||
@@ -388,15 +394,17 @@ def get_possible_return_constants(function: Any) -> list[str] | None:
|
||||
|
||||
StateAttributeVisitor().visit(class_ast)
|
||||
except Exception as e:
|
||||
_printer.print(
|
||||
f"Could not analyze class context for {function.__name__}: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
if verbose:
|
||||
_printer.print(
|
||||
f"Could not analyze class context for {function.__name__}: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
except Exception as e:
|
||||
_printer.print(
|
||||
f"Could not introspect class for {function.__name__}: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
if verbose:
|
||||
_printer.print(
|
||||
f"Could not introspect class for {function.__name__}: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
VariableAssignmentVisitor().visit(code_ast)
|
||||
ReturnVisitor().visit(code_ast)
|
||||
|
||||
@@ -72,13 +72,13 @@ from crewai.utilities.agent_utils import (
|
||||
from crewai.utilities.converter import (
|
||||
Converter,
|
||||
ConverterError,
|
||||
generate_model_description,
|
||||
)
|
||||
from crewai.utilities.guardrail import process_guardrail
|
||||
from crewai.utilities.guardrail_types import GuardrailCallable, GuardrailType
|
||||
from crewai.utilities.i18n import I18N, get_i18n
|
||||
from crewai.utilities.llm_utils import create_llm
|
||||
from crewai.utilities.printer import Printer
|
||||
from crewai.utilities.pydantic_schema_utils import generate_model_description
|
||||
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||
from crewai.utilities.tool_utils import execute_tool_and_check_finality
|
||||
from crewai.utilities.types import LLMMessage
|
||||
@@ -344,11 +344,12 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self._printer.print(
|
||||
content="Agent failed to reach a final answer. This is likely a bug - please report it.",
|
||||
color="red",
|
||||
)
|
||||
handle_unknown_error(self._printer, e)
|
||||
if self.verbose:
|
||||
self._printer.print(
|
||||
content="Agent failed to reach a final answer. This is likely a bug - please report it.",
|
||||
color="red",
|
||||
)
|
||||
handle_unknown_error(self._printer, e, verbose=self.verbose)
|
||||
# Emit error event
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
@@ -396,10 +397,11 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
if isinstance(result, BaseModel):
|
||||
formatted_result = result
|
||||
except ConverterError as e:
|
||||
self._printer.print(
|
||||
content=f"Failed to parse output into response format after retries: {e.message}",
|
||||
color="yellow",
|
||||
)
|
||||
if self.verbose:
|
||||
self._printer.print(
|
||||
content=f"Failed to parse output into response format after retries: {e.message}",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
# Calculate token usage metrics
|
||||
if isinstance(self.llm, BaseLLM):
|
||||
@@ -605,6 +607,7 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
messages=self._messages,
|
||||
llm=cast(LLM, self.llm),
|
||||
callbacks=self._callbacks,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
|
||||
enforce_rpm_limit(self.request_within_rpm_limit)
|
||||
@@ -617,6 +620,7 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
printer=self._printer,
|
||||
from_agent=self,
|
||||
executor_context=self,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
@@ -646,16 +650,18 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
|
||||
self._append_message(formatted_answer.text, role="assistant")
|
||||
except OutputParserError as e: # noqa: PERF203
|
||||
self._printer.print(
|
||||
content="Failed to parse LLM output. Retrying...",
|
||||
color="yellow",
|
||||
)
|
||||
if self.verbose:
|
||||
self._printer.print(
|
||||
content="Failed to parse LLM output. Retrying...",
|
||||
color="yellow",
|
||||
)
|
||||
formatted_answer = handle_output_parser_exception(
|
||||
e=e,
|
||||
messages=self._messages,
|
||||
iterations=self._iterations,
|
||||
log_error_after=3,
|
||||
printer=self._printer,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
@@ -670,9 +676,10 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
llm=cast(LLM, self.llm),
|
||||
callbacks=self._callbacks,
|
||||
i18n=self.i18n,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
continue
|
||||
handle_unknown_error(self._printer, e)
|
||||
handle_unknown_error(self._printer, e, verbose=self.verbose)
|
||||
raise e
|
||||
|
||||
finally:
|
||||
|
||||
@@ -497,7 +497,7 @@ class BaseLLM(ABC):
|
||||
from_agent=from_agent,
|
||||
)
|
||||
|
||||
return result
|
||||
return str(result) if not isinstance(result, str) else result
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Error executing function '{function_name}': {e!s}"
|
||||
@@ -620,11 +620,13 @@ class BaseLLM(ABC):
|
||||
try:
|
||||
# Try to parse as JSON first
|
||||
if response.strip().startswith("{") or response.strip().startswith("["):
|
||||
return response_format.model_validate_json(response)
|
||||
data = json.loads(response)
|
||||
return response_format.model_validate(data)
|
||||
|
||||
json_match = _JSON_EXTRACTION_PATTERN.search(response)
|
||||
if json_match:
|
||||
return response_format.model_validate_json(json_match.group())
|
||||
data = json.loads(json_match.group())
|
||||
return response_format.model_validate(data)
|
||||
|
||||
raise ValueError("No JSON found in response")
|
||||
|
||||
@@ -735,22 +737,25 @@ class BaseLLM(ABC):
|
||||
task=None,
|
||||
crew=None,
|
||||
)
|
||||
verbose = getattr(from_agent, "verbose", True) if from_agent else True
|
||||
printer = Printer()
|
||||
|
||||
try:
|
||||
for hook in before_hooks:
|
||||
result = hook(hook_context)
|
||||
if result is False:
|
||||
printer.print(
|
||||
content="LLM call blocked by before_llm_call hook",
|
||||
color="yellow",
|
||||
)
|
||||
if verbose:
|
||||
printer.print(
|
||||
content="LLM call blocked by before_llm_call hook",
|
||||
color="yellow",
|
||||
)
|
||||
return False
|
||||
except Exception as e:
|
||||
printer.print(
|
||||
content=f"Error in before_llm_call hook: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
if verbose:
|
||||
printer.print(
|
||||
content=f"Error in before_llm_call hook: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
@@ -803,6 +808,7 @@ class BaseLLM(ABC):
|
||||
crew=None,
|
||||
response=response,
|
||||
)
|
||||
verbose = getattr(from_agent, "verbose", True) if from_agent else True
|
||||
printer = Printer()
|
||||
modified_response = response
|
||||
|
||||
@@ -813,9 +819,10 @@ class BaseLLM(ABC):
|
||||
modified_response = result
|
||||
hook_context.response = modified_response
|
||||
except Exception as e:
|
||||
printer.print(
|
||||
content=f"Error in after_llm_call hook: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
if verbose:
|
||||
printer.print(
|
||||
content=f"Error in after_llm_call hook: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
return modified_response
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Sequence
|
||||
from collections.abc import Mapping, Sequence
|
||||
from contextlib import AsyncExitStack
|
||||
import json
|
||||
import logging
|
||||
@@ -538,7 +538,7 @@ class BedrockCompletion(BaseLLM):
|
||||
self,
|
||||
messages: list[LLMMessage],
|
||||
body: BedrockConverseRequestBody,
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
available_functions: Mapping[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
@@ -1009,7 +1009,7 @@ class BedrockCompletion(BaseLLM):
|
||||
self,
|
||||
messages: list[LLMMessage],
|
||||
body: BedrockConverseRequestBody,
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
available_functions: Mapping[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
|
||||
@@ -466,9 +466,9 @@ class GeminiCompletion(BaseLLM):
|
||||
if response_model:
|
||||
config_params["response_mime_type"] = "application/json"
|
||||
schema_output = generate_model_description(response_model)
|
||||
config_params["response_schema"] = schema_output.get("json_schema", {}).get(
|
||||
"schema", {}
|
||||
)
|
||||
config_params["response_json_schema"] = schema_output.get(
|
||||
"json_schema", {}
|
||||
).get("schema", {})
|
||||
|
||||
# Handle tools for supported models
|
||||
if tools and self.supports_tools:
|
||||
|
||||
@@ -12,15 +12,17 @@ from crewai.utilities.paths import db_storage_path
|
||||
class LTMSQLiteStorage:
|
||||
"""SQLite storage class for long-term memory data."""
|
||||
|
||||
def __init__(self, db_path: str | None = None) -> None:
|
||||
def __init__(self, db_path: str | None = None, verbose: bool = True) -> None:
|
||||
"""Initialize the SQLite storage.
|
||||
|
||||
Args:
|
||||
db_path: Optional path to the database file.
|
||||
verbose: Whether to print error messages.
|
||||
"""
|
||||
if db_path is None:
|
||||
db_path = str(Path(db_storage_path()) / "long_term_memory_storage.db")
|
||||
self.db_path = db_path
|
||||
self._verbose = verbose
|
||||
self._printer: Printer = Printer()
|
||||
Path(self.db_path).parent.mkdir(parents=True, exist_ok=True)
|
||||
self._initialize_db()
|
||||
@@ -44,10 +46,11 @@ class LTMSQLiteStorage:
|
||||
|
||||
conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred during database initialization: {e}",
|
||||
color="red",
|
||||
)
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred during database initialization: {e}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
def save(
|
||||
self,
|
||||
@@ -69,10 +72,11 @@ class LTMSQLiteStorage:
|
||||
)
|
||||
conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while saving to LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while saving to LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
def load(self, task_description: str, latest_n: int) -> list[dict[str, Any]] | None:
|
||||
"""Queries the LTM table by task description with error handling."""
|
||||
@@ -101,10 +105,11 @@ class LTMSQLiteStorage:
|
||||
]
|
||||
|
||||
except sqlite3.Error as e:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while querying LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while querying LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
return None
|
||||
|
||||
def reset(self) -> None:
|
||||
@@ -116,10 +121,11 @@ class LTMSQLiteStorage:
|
||||
conn.commit()
|
||||
|
||||
except sqlite3.Error as e:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while deleting all rows in LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while deleting all rows in LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
async def asave(
|
||||
self,
|
||||
@@ -147,10 +153,11 @@ class LTMSQLiteStorage:
|
||||
)
|
||||
await conn.commit()
|
||||
except aiosqlite.Error as e:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while saving to LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while saving to LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
async def aload(
|
||||
self, task_description: str, latest_n: int
|
||||
@@ -187,10 +194,11 @@ class LTMSQLiteStorage:
|
||||
for row in rows
|
||||
]
|
||||
except aiosqlite.Error as e:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while querying LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while querying LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
return None
|
||||
|
||||
async def areset(self) -> None:
|
||||
@@ -200,7 +208,8 @@ class LTMSQLiteStorage:
|
||||
await conn.execute("DELETE FROM long_term_memories")
|
||||
await conn.commit()
|
||||
except aiosqlite.Error as e:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while deleting all rows in LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while deleting all rows in LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
"""Type definitions specific to ChromaDB implementation."""
|
||||
|
||||
from collections.abc import Mapping
|
||||
from typing import Any, NamedTuple
|
||||
|
||||
from chromadb.api import AsyncClientAPI, ClientAPI
|
||||
@@ -48,7 +49,7 @@ class PreparedDocuments(NamedTuple):
|
||||
|
||||
ids: list[str]
|
||||
texts: list[str]
|
||||
metadatas: list[dict[str, str | int | float | bool]]
|
||||
metadatas: list[Mapping[str, str | int | float | bool]]
|
||||
|
||||
|
||||
class ExtractedSearchParams(NamedTuple):
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
"""Utility functions for ChromaDB client implementation."""
|
||||
|
||||
from collections.abc import Mapping
|
||||
import hashlib
|
||||
import json
|
||||
from typing import Literal, TypeGuard, cast
|
||||
@@ -65,7 +66,7 @@ def _prepare_documents_for_chromadb(
|
||||
"""
|
||||
ids: list[str] = []
|
||||
texts: list[str] = []
|
||||
metadatas: list[dict[str, str | int | float | bool]] = []
|
||||
metadatas: list[Mapping[str, str | int | float | bool]] = []
|
||||
seen_ids: dict[str, int] = {}
|
||||
|
||||
try:
|
||||
@@ -110,7 +111,7 @@ def _prepare_documents_for_chromadb(
|
||||
|
||||
def _create_batch_slice(
|
||||
prepared: PreparedDocuments, start_index: int, batch_size: int
|
||||
) -> tuple[list[str], list[str], list[dict[str, str | int | float | bool]] | None]:
|
||||
) -> tuple[list[str], list[str], list[Mapping[str, str | int | float | bool]] | None]:
|
||||
"""Create a batch slice from prepared documents.
|
||||
|
||||
Args:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
"""IBM WatsonX embedding function implementation."""
|
||||
|
||||
from typing import cast
|
||||
from typing import Any, cast
|
||||
|
||||
from chromadb.api.types import Documents, EmbeddingFunction, Embeddings
|
||||
from typing_extensions import Unpack
|
||||
@@ -15,14 +15,18 @@ _printer = Printer()
|
||||
class WatsonXEmbeddingFunction(EmbeddingFunction[Documents]):
|
||||
"""Embedding function for IBM WatsonX models."""
|
||||
|
||||
def __init__(self, **kwargs: Unpack[WatsonXProviderConfig]) -> None:
|
||||
def __init__(
|
||||
self, *, verbose: bool = True, **kwargs: Unpack[WatsonXProviderConfig]
|
||||
) -> None:
|
||||
"""Initialize WatsonX embedding function.
|
||||
|
||||
Args:
|
||||
verbose: Whether to print error messages.
|
||||
**kwargs: Configuration parameters for WatsonX Embeddings and Credentials.
|
||||
"""
|
||||
super().__init__(**kwargs)
|
||||
self._config = kwargs
|
||||
self._verbose = verbose
|
||||
|
||||
@staticmethod
|
||||
def name() -> str:
|
||||
@@ -56,7 +60,7 @@ class WatsonXEmbeddingFunction(EmbeddingFunction[Documents]):
|
||||
if isinstance(input, str):
|
||||
input = [input]
|
||||
|
||||
embeddings_config: dict = {
|
||||
embeddings_config: dict[str, Any] = {
|
||||
"model_id": self._config["model_id"],
|
||||
}
|
||||
if "params" in self._config and self._config["params"] is not None:
|
||||
@@ -90,7 +94,7 @@ class WatsonXEmbeddingFunction(EmbeddingFunction[Documents]):
|
||||
if "credentials" in self._config and self._config["credentials"] is not None:
|
||||
embeddings_config["credentials"] = self._config["credentials"]
|
||||
else:
|
||||
cred_config: dict = {}
|
||||
cred_config: dict[str, Any] = {}
|
||||
if "url" in self._config and self._config["url"] is not None:
|
||||
cred_config["url"] = self._config["url"]
|
||||
if "api_key" in self._config and self._config["api_key"] is not None:
|
||||
@@ -159,5 +163,6 @@ class WatsonXEmbeddingFunction(EmbeddingFunction[Documents]):
|
||||
embeddings = embedding.embed_documents(input)
|
||||
return cast(Embeddings, embeddings)
|
||||
except Exception as e:
|
||||
_printer.print(f"Error during WatsonX embedding: {e}", color="red")
|
||||
if self._verbose:
|
||||
_printer.print(f"Error during WatsonX embedding: {e}", color="red")
|
||||
raise
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
"""Type definitions for the embeddings module."""
|
||||
|
||||
from typing import Annotated, Any, Literal, TypeAlias
|
||||
|
||||
from pydantic import Field
|
||||
from typing import Any, Literal, TypeAlias
|
||||
|
||||
from crewai.rag.core.base_embeddings_provider import BaseEmbeddingsProvider
|
||||
from crewai.rag.embeddings.providers.aws.types import BedrockProviderSpec
|
||||
@@ -31,7 +29,7 @@ from crewai.rag.embeddings.providers.text2vec.types import Text2VecProviderSpec
|
||||
from crewai.rag.embeddings.providers.voyageai.types import VoyageAIProviderSpec
|
||||
|
||||
|
||||
ProviderSpec: TypeAlias = Annotated[
|
||||
ProviderSpec: TypeAlias = (
|
||||
AzureProviderSpec
|
||||
| BedrockProviderSpec
|
||||
| CohereProviderSpec
|
||||
@@ -49,9 +47,8 @@ ProviderSpec: TypeAlias = Annotated[
|
||||
| Text2VecProviderSpec
|
||||
| VertexAIProviderSpec
|
||||
| VoyageAIProviderSpec
|
||||
| WatsonXProviderSpec,
|
||||
Field(discriminator="provider"),
|
||||
]
|
||||
| WatsonXProviderSpec
|
||||
)
|
||||
|
||||
AllowedEmbeddingProviders = Literal[
|
||||
"azure",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
"""Type definitions for RAG (Retrieval-Augmented Generation) systems."""
|
||||
|
||||
from collections.abc import Callable
|
||||
from collections.abc import Callable, Mapping
|
||||
from typing import Any, TypeAlias
|
||||
|
||||
from typing_extensions import Required, TypedDict
|
||||
@@ -19,8 +19,8 @@ class BaseRecord(TypedDict, total=False):
|
||||
doc_id: str
|
||||
content: Required[str]
|
||||
metadata: (
|
||||
dict[str, str | int | float | bool]
|
||||
| list[dict[str, str | int | float | bool]]
|
||||
Mapping[str, str | int | float | bool]
|
||||
| list[Mapping[str, str | int | float | bool]]
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -767,10 +767,11 @@ class Task(BaseModel):
|
||||
if files:
|
||||
supported_types: list[str] = []
|
||||
if self.agent.llm and self.agent.llm.supports_multimodal():
|
||||
provider = getattr(self.agent.llm, "provider", None) or getattr(
|
||||
self.agent.llm, "model", "openai"
|
||||
provider: str = str(
|
||||
getattr(self.agent.llm, "provider", None)
|
||||
or getattr(self.agent.llm, "model", "openai")
|
||||
)
|
||||
api = getattr(self.agent.llm, "api", None)
|
||||
api: str | None = getattr(self.agent.llm, "api", None)
|
||||
supported_types = get_supported_content_types(provider, api)
|
||||
|
||||
def is_auto_injected(content_type: str) -> bool:
|
||||
@@ -887,10 +888,11 @@ Follow these guidelines:
|
||||
try:
|
||||
crew_chat_messages = json.loads(crew_chat_messages_json)
|
||||
except json.JSONDecodeError as e:
|
||||
_printer.print(
|
||||
f"An error occurred while parsing crew chat messages: {e}",
|
||||
color="red",
|
||||
)
|
||||
if self.agent and self.agent.verbose:
|
||||
_printer.print(
|
||||
f"An error occurred while parsing crew chat messages: {e}",
|
||||
color="red",
|
||||
)
|
||||
raise
|
||||
|
||||
conversation_history = "\n".join(
|
||||
@@ -1132,11 +1134,12 @@ Follow these guidelines:
|
||||
guardrail_result_error=guardrail_result.error,
|
||||
task_output=task_output.raw,
|
||||
)
|
||||
printer = Printer()
|
||||
printer.print(
|
||||
content=f"Guardrail {guardrail_index if guardrail_index is not None else ''} blocked (attempt {attempt + 1}/{max_attempts}), retrying due to: {guardrail_result.error}\n",
|
||||
color="yellow",
|
||||
)
|
||||
if agent and agent.verbose:
|
||||
printer = Printer()
|
||||
printer.print(
|
||||
content=f"Guardrail {guardrail_index if guardrail_index is not None else ''} blocked (attempt {attempt + 1}/{max_attempts}), retrying due to: {guardrail_result.error}\n",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
# Regenerate output from agent
|
||||
result = agent.execute_task(
|
||||
@@ -1229,11 +1232,12 @@ Follow these guidelines:
|
||||
guardrail_result_error=guardrail_result.error,
|
||||
task_output=task_output.raw,
|
||||
)
|
||||
printer = Printer()
|
||||
printer.print(
|
||||
content=f"Guardrail {guardrail_index if guardrail_index is not None else ''} blocked (attempt {attempt + 1}/{max_attempts}), retrying due to: {guardrail_result.error}\n",
|
||||
color="yellow",
|
||||
)
|
||||
if agent and agent.verbose:
|
||||
printer = Printer()
|
||||
printer.print(
|
||||
content=f"Guardrail {guardrail_index if guardrail_index is not None else ''} blocked (attempt {attempt + 1}/{max_attempts}), retrying due to: {guardrail_result.error}\n",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
result = await agent.aexecute_task(
|
||||
task=self,
|
||||
|
||||
@@ -200,12 +200,9 @@ class CrewStructuredTool:
|
||||
"""
|
||||
if isinstance(raw_args, str):
|
||||
try:
|
||||
validated_args = self.args_schema.model_validate_json(raw_args)
|
||||
return validated_args.model_dump()
|
||||
raw_args = json.loads(raw_args)
|
||||
except json.JSONDecodeError as e:
|
||||
raise ValueError(f"Failed to parse arguments as JSON: {e}") from e
|
||||
except Exception as e:
|
||||
raise ValueError(f"Arguments validation failed: {e}") from e
|
||||
|
||||
try:
|
||||
validated_args = self.args_schema.model_validate(raw_args)
|
||||
|
||||
@@ -384,6 +384,8 @@ class ToolUsage:
|
||||
if (
|
||||
hasattr(available_tool, "max_usage_count")
|
||||
and available_tool.max_usage_count is not None
|
||||
and self.agent
|
||||
and self.agent.verbose
|
||||
):
|
||||
self._printer.print(
|
||||
content=f"Tool '{sanitize_tool_name(available_tool.name)}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}",
|
||||
@@ -396,6 +398,8 @@ class ToolUsage:
|
||||
if (
|
||||
hasattr(available_tool, "max_usage_count")
|
||||
and available_tool.max_usage_count is not None
|
||||
and self.agent
|
||||
and self.agent.verbose
|
||||
):
|
||||
self._printer.print(
|
||||
content=f"Tool '{sanitize_tool_name(available_tool.name)}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}",
|
||||
@@ -610,6 +614,8 @@ class ToolUsage:
|
||||
if (
|
||||
hasattr(available_tool, "max_usage_count")
|
||||
and available_tool.max_usage_count is not None
|
||||
and self.agent
|
||||
and self.agent.verbose
|
||||
):
|
||||
self._printer.print(
|
||||
content=f"Tool '{sanitize_tool_name(available_tool.name)}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}",
|
||||
@@ -622,6 +628,8 @@ class ToolUsage:
|
||||
if (
|
||||
hasattr(available_tool, "max_usage_count")
|
||||
and available_tool.max_usage_count is not None
|
||||
and self.agent
|
||||
and self.agent.verbose
|
||||
):
|
||||
self._printer.print(
|
||||
content=f"Tool '{sanitize_tool_name(available_tool.name)}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}",
|
||||
@@ -884,15 +892,17 @@ class ToolUsage:
|
||||
# Attempt 4: Repair JSON
|
||||
try:
|
||||
repaired_input = str(repair_json(tool_input, skip_json_loads=True))
|
||||
self._printer.print(
|
||||
content=f"Repaired JSON: {repaired_input}", color="blue"
|
||||
)
|
||||
if self.agent and self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=f"Repaired JSON: {repaired_input}", color="blue"
|
||||
)
|
||||
arguments = json.loads(repaired_input)
|
||||
if isinstance(arguments, dict):
|
||||
return arguments
|
||||
except Exception as e:
|
||||
error = f"Failed to repair JSON: {e}"
|
||||
self._printer.print(content=error, color="red")
|
||||
if self.agent and self.agent.verbose:
|
||||
self._printer.print(content=error, color="red")
|
||||
|
||||
error_message = (
|
||||
"Tool input must be a valid dictionary in JSON or Python literal format"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Callable
|
||||
from collections.abc import Callable, Sequence
|
||||
import json
|
||||
import re
|
||||
from typing import TYPE_CHECKING, Any, Final, Literal, TypedDict
|
||||
@@ -98,7 +98,7 @@ def parse_tools(tools: list[BaseTool]) -> list[CrewStructuredTool]:
|
||||
return tools_list
|
||||
|
||||
|
||||
def get_tool_names(tools: list[CrewStructuredTool | BaseTool]) -> str:
|
||||
def get_tool_names(tools: Sequence[CrewStructuredTool | BaseTool]) -> str:
|
||||
"""Get the sanitized names of the tools.
|
||||
|
||||
Args:
|
||||
@@ -111,7 +111,7 @@ def get_tool_names(tools: list[CrewStructuredTool | BaseTool]) -> str:
|
||||
|
||||
|
||||
def render_text_description_and_args(
|
||||
tools: list[CrewStructuredTool | BaseTool],
|
||||
tools: Sequence[CrewStructuredTool | BaseTool],
|
||||
) -> str:
|
||||
"""Render the tool name, description, and args in plain text.
|
||||
|
||||
@@ -130,7 +130,7 @@ def render_text_description_and_args(
|
||||
|
||||
|
||||
def convert_tools_to_openai_schema(
|
||||
tools: list[BaseTool | CrewStructuredTool],
|
||||
tools: Sequence[BaseTool | CrewStructuredTool],
|
||||
) -> tuple[list[dict[str, Any]], dict[str, Callable[..., Any]]]:
|
||||
"""Convert CrewAI tools to OpenAI function calling format.
|
||||
|
||||
@@ -210,6 +210,7 @@ def handle_max_iterations_exceeded(
|
||||
messages: list[LLMMessage],
|
||||
llm: LLM | BaseLLM,
|
||||
callbacks: list[TokenCalcHandler],
|
||||
verbose: bool = True,
|
||||
) -> AgentFinish:
|
||||
"""Handles the case when the maximum number of iterations is exceeded. Performs one more LLM call to get the final answer.
|
||||
|
||||
@@ -220,14 +221,16 @@ def handle_max_iterations_exceeded(
|
||||
messages: List of messages to send to the LLM.
|
||||
llm: The LLM instance to call.
|
||||
callbacks: List of callbacks for the LLM call.
|
||||
verbose: Whether to print output.
|
||||
|
||||
Returns:
|
||||
AgentFinish with the final answer after exceeding max iterations.
|
||||
"""
|
||||
printer.print(
|
||||
content="Maximum iterations reached. Requesting final answer.",
|
||||
color="yellow",
|
||||
)
|
||||
if verbose:
|
||||
printer.print(
|
||||
content="Maximum iterations reached. Requesting final answer.",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
if formatted_answer and hasattr(formatted_answer, "text"):
|
||||
assistant_message = (
|
||||
@@ -245,10 +248,11 @@ def handle_max_iterations_exceeded(
|
||||
)
|
||||
|
||||
if answer is None or answer == "":
|
||||
printer.print(
|
||||
content="Received None or empty response from LLM call.",
|
||||
color="red",
|
||||
)
|
||||
if verbose:
|
||||
printer.print(
|
||||
content="Received None or empty response from LLM call.",
|
||||
color="red",
|
||||
)
|
||||
raise ValueError("Invalid response from LLM call - None or empty.")
|
||||
|
||||
formatted = format_answer(answer=answer)
|
||||
@@ -322,6 +326,7 @@ def get_llm_response(
|
||||
from_agent: Agent | LiteAgent | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
executor_context: CrewAgentExecutor | AgentExecutor | LiteAgent | None = None,
|
||||
verbose: bool = True,
|
||||
) -> str | Any:
|
||||
"""Call the LLM and return the response, handling any invalid responses.
|
||||
|
||||
@@ -347,7 +352,7 @@ def get_llm_response(
|
||||
"""
|
||||
|
||||
if executor_context is not None:
|
||||
if not _setup_before_llm_call_hooks(executor_context, printer):
|
||||
if not _setup_before_llm_call_hooks(executor_context, printer, verbose=verbose):
|
||||
raise ValueError("LLM call blocked by before_llm_call hook")
|
||||
messages = executor_context.messages
|
||||
|
||||
@@ -364,13 +369,16 @@ def get_llm_response(
|
||||
except Exception as e:
|
||||
raise e
|
||||
if not answer:
|
||||
printer.print(
|
||||
content="Received None or empty response from LLM call.",
|
||||
color="red",
|
||||
)
|
||||
if verbose:
|
||||
printer.print(
|
||||
content="Received None or empty response from LLM call.",
|
||||
color="red",
|
||||
)
|
||||
raise ValueError("Invalid response from LLM call - None or empty.")
|
||||
|
||||
return _setup_after_llm_call_hooks(executor_context, answer, printer)
|
||||
return _setup_after_llm_call_hooks(
|
||||
executor_context, answer, printer, verbose=verbose
|
||||
)
|
||||
|
||||
|
||||
async def aget_llm_response(
|
||||
@@ -384,6 +392,7 @@ async def aget_llm_response(
|
||||
from_agent: Agent | LiteAgent | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
executor_context: CrewAgentExecutor | AgentExecutor | None = None,
|
||||
verbose: bool = True,
|
||||
) -> str | Any:
|
||||
"""Call the LLM asynchronously and return the response.
|
||||
|
||||
@@ -408,7 +417,7 @@ async def aget_llm_response(
|
||||
ValueError: If the response is None or empty.
|
||||
"""
|
||||
if executor_context is not None:
|
||||
if not _setup_before_llm_call_hooks(executor_context, printer):
|
||||
if not _setup_before_llm_call_hooks(executor_context, printer, verbose=verbose):
|
||||
raise ValueError("LLM call blocked by before_llm_call hook")
|
||||
messages = executor_context.messages
|
||||
|
||||
@@ -425,13 +434,16 @@ async def aget_llm_response(
|
||||
except Exception as e:
|
||||
raise e
|
||||
if not answer:
|
||||
printer.print(
|
||||
content="Received None or empty response from LLM call.",
|
||||
color="red",
|
||||
)
|
||||
if verbose:
|
||||
printer.print(
|
||||
content="Received None or empty response from LLM call.",
|
||||
color="red",
|
||||
)
|
||||
raise ValueError("Invalid response from LLM call - None or empty.")
|
||||
|
||||
return _setup_after_llm_call_hooks(executor_context, answer, printer)
|
||||
return _setup_after_llm_call_hooks(
|
||||
executor_context, answer, printer, verbose=verbose
|
||||
)
|
||||
|
||||
|
||||
def process_llm_response(
|
||||
@@ -498,13 +510,19 @@ def handle_agent_action_core(
|
||||
return formatted_answer
|
||||
|
||||
|
||||
def handle_unknown_error(printer: Printer, exception: Exception) -> None:
|
||||
def handle_unknown_error(
|
||||
printer: Printer, exception: Exception, verbose: bool = True
|
||||
) -> None:
|
||||
"""Handle unknown errors by informing the user.
|
||||
|
||||
Args:
|
||||
printer: Printer instance for output
|
||||
exception: The exception that occurred
|
||||
verbose: Whether to print output.
|
||||
"""
|
||||
if not verbose:
|
||||
return
|
||||
|
||||
error_message = str(exception)
|
||||
|
||||
if "litellm" in error_message:
|
||||
@@ -526,6 +544,7 @@ def handle_output_parser_exception(
|
||||
iterations: int,
|
||||
log_error_after: int = 3,
|
||||
printer: Printer | None = None,
|
||||
verbose: bool = True,
|
||||
) -> AgentAction:
|
||||
"""Handle OutputParserError by updating messages and formatted_answer.
|
||||
|
||||
@@ -548,7 +567,7 @@ def handle_output_parser_exception(
|
||||
thought="",
|
||||
)
|
||||
|
||||
if iterations > log_error_after and printer:
|
||||
if verbose and iterations > log_error_after and printer:
|
||||
printer.print(
|
||||
content=f"Error parsing LLM output, agent will retry: {e.error}",
|
||||
color="red",
|
||||
@@ -578,6 +597,7 @@ def handle_context_length(
|
||||
llm: LLM | BaseLLM,
|
||||
callbacks: list[TokenCalcHandler],
|
||||
i18n: I18N,
|
||||
verbose: bool = True,
|
||||
) -> None:
|
||||
"""Handle context length exceeded by either summarizing or raising an error.
|
||||
|
||||
@@ -593,16 +613,20 @@ def handle_context_length(
|
||||
SystemExit: If context length is exceeded and user opts not to summarize
|
||||
"""
|
||||
if respect_context_window:
|
||||
printer.print(
|
||||
content="Context length exceeded. Summarizing content to fit the model context window. Might take a while...",
|
||||
color="yellow",
|
||||
if verbose:
|
||||
printer.print(
|
||||
content="Context length exceeded. Summarizing content to fit the model context window. Might take a while...",
|
||||
color="yellow",
|
||||
)
|
||||
summarize_messages(
|
||||
messages=messages, llm=llm, callbacks=callbacks, i18n=i18n, verbose=verbose
|
||||
)
|
||||
summarize_messages(messages=messages, llm=llm, callbacks=callbacks, i18n=i18n)
|
||||
else:
|
||||
printer.print(
|
||||
content="Context length exceeded. Consider using smaller text or RAG tools from crewai_tools.",
|
||||
color="red",
|
||||
)
|
||||
if verbose:
|
||||
printer.print(
|
||||
content="Context length exceeded. Consider using smaller text or RAG tools from crewai_tools.",
|
||||
color="red",
|
||||
)
|
||||
raise SystemExit(
|
||||
"Context length exceeded and user opted not to summarize. Consider using smaller text or RAG tools from crewai_tools."
|
||||
)
|
||||
@@ -613,6 +637,7 @@ def summarize_messages(
|
||||
llm: LLM | BaseLLM,
|
||||
callbacks: list[TokenCalcHandler],
|
||||
i18n: I18N,
|
||||
verbose: bool = True,
|
||||
) -> None:
|
||||
"""Summarize messages to fit within context window.
|
||||
|
||||
@@ -644,10 +669,11 @@ def summarize_messages(
|
||||
|
||||
total_groups = len(messages_groups)
|
||||
for idx, group in enumerate(messages_groups, 1):
|
||||
Printer().print(
|
||||
content=f"Summarizing {idx}/{total_groups}...",
|
||||
color="yellow",
|
||||
)
|
||||
if verbose:
|
||||
Printer().print(
|
||||
content=f"Summarizing {idx}/{total_groups}...",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
summarization_messages = [
|
||||
format_message_for_llm(
|
||||
@@ -905,12 +931,14 @@ def extract_tool_call_info(
|
||||
def _setup_before_llm_call_hooks(
|
||||
executor_context: CrewAgentExecutor | AgentExecutor | LiteAgent | None,
|
||||
printer: Printer,
|
||||
verbose: bool = True,
|
||||
) -> bool:
|
||||
"""Setup and invoke before_llm_call hooks for the executor context.
|
||||
|
||||
Args:
|
||||
executor_context: The executor context to setup the hooks for.
|
||||
printer: Printer instance for error logging.
|
||||
verbose: Whether to print output.
|
||||
|
||||
Returns:
|
||||
True if LLM execution should proceed, False if blocked by a hook.
|
||||
@@ -925,26 +953,29 @@ def _setup_before_llm_call_hooks(
|
||||
for hook in executor_context.before_llm_call_hooks:
|
||||
result = hook(hook_context)
|
||||
if result is False:
|
||||
printer.print(
|
||||
content="LLM call blocked by before_llm_call hook",
|
||||
color="yellow",
|
||||
)
|
||||
if verbose:
|
||||
printer.print(
|
||||
content="LLM call blocked by before_llm_call hook",
|
||||
color="yellow",
|
||||
)
|
||||
return False
|
||||
except Exception as e:
|
||||
printer.print(
|
||||
content=f"Error in before_llm_call hook: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
if verbose:
|
||||
printer.print(
|
||||
content=f"Error in before_llm_call hook: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
if not isinstance(executor_context.messages, list):
|
||||
printer.print(
|
||||
content=(
|
||||
"Warning: before_llm_call hook replaced messages with non-list. "
|
||||
"Restoring original messages list. Hooks should modify messages in-place, "
|
||||
"not replace the list (e.g., use context.messages.append() not context.messages = [])."
|
||||
),
|
||||
color="yellow",
|
||||
)
|
||||
if verbose:
|
||||
printer.print(
|
||||
content=(
|
||||
"Warning: before_llm_call hook replaced messages with non-list. "
|
||||
"Restoring original messages list. Hooks should modify messages in-place, "
|
||||
"not replace the list (e.g., use context.messages.append() not context.messages = [])."
|
||||
),
|
||||
color="yellow",
|
||||
)
|
||||
if isinstance(original_messages, list):
|
||||
executor_context.messages = original_messages
|
||||
else:
|
||||
@@ -957,6 +988,7 @@ def _setup_after_llm_call_hooks(
|
||||
executor_context: CrewAgentExecutor | AgentExecutor | LiteAgent | None,
|
||||
answer: str,
|
||||
printer: Printer,
|
||||
verbose: bool = True,
|
||||
) -> str:
|
||||
"""Setup and invoke after_llm_call hooks for the executor context.
|
||||
|
||||
@@ -964,6 +996,7 @@ def _setup_after_llm_call_hooks(
|
||||
executor_context: The executor context to setup the hooks for.
|
||||
answer: The LLM response string.
|
||||
printer: Printer instance for error logging.
|
||||
verbose: Whether to print output.
|
||||
|
||||
Returns:
|
||||
The potentially modified response string.
|
||||
@@ -981,20 +1014,22 @@ def _setup_after_llm_call_hooks(
|
||||
answer = modified_response
|
||||
|
||||
except Exception as e:
|
||||
printer.print(
|
||||
content=f"Error in after_llm_call hook: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
if verbose:
|
||||
printer.print(
|
||||
content=f"Error in after_llm_call hook: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
if not isinstance(executor_context.messages, list):
|
||||
printer.print(
|
||||
content=(
|
||||
"Warning: after_llm_call hook replaced messages with non-list. "
|
||||
"Restoring original messages list. Hooks should modify messages in-place, "
|
||||
"not replace the list (e.g., use context.messages.append() not context.messages = [])."
|
||||
),
|
||||
color="yellow",
|
||||
)
|
||||
if verbose:
|
||||
printer.print(
|
||||
content=(
|
||||
"Warning: after_llm_call hook replaced messages with non-list. "
|
||||
"Restoring original messages list. Hooks should modify messages in-place, "
|
||||
"not replace the list (e.g., use context.messages.append() not context.messages = [])."
|
||||
),
|
||||
color="yellow",
|
||||
)
|
||||
if isinstance(original_messages, list):
|
||||
executor_context.messages = original_messages
|
||||
else:
|
||||
|
||||
@@ -205,10 +205,11 @@ def convert_to_model(
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
Printer().print(
|
||||
content=f"Unexpected error during model conversion: {type(e).__name__}: {e}. Returning original result.",
|
||||
color="red",
|
||||
)
|
||||
if agent and getattr(agent, "verbose", True):
|
||||
Printer().print(
|
||||
content=f"Unexpected error during model conversion: {type(e).__name__}: {e}. Returning original result.",
|
||||
color="red",
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
@@ -262,10 +263,11 @@ def handle_partial_json(
|
||||
except ValidationError:
|
||||
raise
|
||||
except Exception as e:
|
||||
Printer().print(
|
||||
content=f"Unexpected error during partial JSON handling: {type(e).__name__}: {e}. Attempting alternative conversion method.",
|
||||
color="red",
|
||||
)
|
||||
if agent and getattr(agent, "verbose", True):
|
||||
Printer().print(
|
||||
content=f"Unexpected error during partial JSON handling: {type(e).__name__}: {e}. Attempting alternative conversion method.",
|
||||
color="red",
|
||||
)
|
||||
|
||||
return convert_with_instructions(
|
||||
result=result,
|
||||
@@ -323,10 +325,11 @@ def convert_with_instructions(
|
||||
)
|
||||
|
||||
if isinstance(exported_result, ConverterError):
|
||||
Printer().print(
|
||||
content=f"Failed to convert result to model: {exported_result}",
|
||||
color="red",
|
||||
)
|
||||
if agent and getattr(agent, "verbose", True):
|
||||
Printer().print(
|
||||
content=f"Failed to convert result to model: {exported_result}",
|
||||
color="red",
|
||||
)
|
||||
return result
|
||||
|
||||
return exported_result
|
||||
|
||||
@@ -1004,3 +1004,53 @@ def test_prepare_kickoff_param_files_override_message_files():
|
||||
|
||||
assert "files" in inputs
|
||||
assert inputs["files"]["same.png"] is param_file # param takes precedence
|
||||
|
||||
|
||||
def test_lite_agent_verbose_false_suppresses_printer_output():
|
||||
"""Test that setting verbose=False suppresses all printer output."""
|
||||
from crewai.agents.parser import AgentFinish
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
|
||||
mock_llm = Mock(spec=LLM)
|
||||
mock_llm.call.return_value = "Final Answer: Hello!"
|
||||
mock_llm.stop = []
|
||||
mock_llm.supports_stop_words.return_value = False
|
||||
mock_llm.get_token_usage_summary.return_value = UsageMetrics(
|
||||
total_tokens=100,
|
||||
prompt_tokens=50,
|
||||
completion_tokens=50,
|
||||
cached_prompt_tokens=0,
|
||||
successful_requests=1,
|
||||
)
|
||||
|
||||
with pytest.warns(DeprecationWarning):
|
||||
agent = LiteAgent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
llm=mock_llm,
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
result = agent.kickoff("Say hello")
|
||||
|
||||
assert result is not None
|
||||
assert isinstance(result, LiteAgentOutput)
|
||||
# Verify the printer was never called
|
||||
agent._printer.print = Mock()
|
||||
# For a clean verification, patch printer before execution
|
||||
with pytest.warns(DeprecationWarning):
|
||||
agent2 = LiteAgent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
llm=mock_llm,
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
mock_printer = Mock()
|
||||
agent2._printer = mock_printer
|
||||
|
||||
agent2.kickoff("Say hello")
|
||||
|
||||
mock_printer.print.assert_not_called()
|
||||
|
||||
@@ -2585,6 +2585,7 @@ def test_warning_long_term_memory_without_entity_memory():
|
||||
goal="You research about math.",
|
||||
backstory="You're an expert in research and you love to learn new things.",
|
||||
allow_delegation=False,
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
task1 = Task(
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
"""CrewAI development tools."""
|
||||
|
||||
__version__ = "1.9.0"
|
||||
__version__ = "1.9.1"
|
||||
|
||||
Reference in New Issue
Block a user