mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-29 01:58:14 +00:00
Compare commits
3 Commits
main
...
lorenze/im
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
92505685e1 | ||
|
|
ae37e88f53 | ||
|
|
02f6926aa0 |
@@ -152,4 +152,4 @@ __all__ = [
|
||||
"wrap_file_source",
|
||||
]
|
||||
|
||||
__version__ = "1.9.2"
|
||||
__version__ = "1.9.0"
|
||||
|
||||
@@ -12,7 +12,7 @@ dependencies = [
|
||||
"pytube~=15.0.0",
|
||||
"requests~=2.32.5",
|
||||
"docker~=7.1.0",
|
||||
"crewai==1.9.2",
|
||||
"crewai==1.9.0",
|
||||
"lancedb~=0.5.4",
|
||||
"tiktoken~=0.8.0",
|
||||
"beautifulsoup4~=4.13.4",
|
||||
|
||||
@@ -291,4 +291,4 @@ __all__ = [
|
||||
"ZapierActionTools",
|
||||
]
|
||||
|
||||
__version__ = "1.9.2"
|
||||
__version__ = "1.9.0"
|
||||
|
||||
@@ -49,7 +49,7 @@ Repository = "https://github.com/crewAIInc/crewAI"
|
||||
|
||||
[project.optional-dependencies]
|
||||
tools = [
|
||||
"crewai-tools==1.9.2",
|
||||
"crewai-tools==1.9.0",
|
||||
]
|
||||
embeddings = [
|
||||
"tiktoken~=0.8.0"
|
||||
|
||||
@@ -40,7 +40,7 @@ def _suppress_pydantic_deprecation_warnings() -> None:
|
||||
|
||||
_suppress_pydantic_deprecation_warnings()
|
||||
|
||||
__version__ = "1.9.2"
|
||||
__version__ = "1.9.0"
|
||||
_telemetry_submitted = False
|
||||
|
||||
|
||||
|
||||
@@ -37,8 +37,7 @@ class CrewAgentExecutorMixin:
|
||||
self.crew
|
||||
and self.agent
|
||||
and self.task
|
||||
and f"Action: {sanitize_tool_name('Delegate work to coworker')}"
|
||||
not in output.text
|
||||
and f"Action: {sanitize_tool_name('Delegate work to coworker')}" not in output.text
|
||||
):
|
||||
try:
|
||||
if (
|
||||
@@ -133,11 +132,10 @@ class CrewAgentExecutorMixin:
|
||||
and self.crew._long_term_memory
|
||||
and self.crew._entity_memory is None
|
||||
):
|
||||
if self.agent and self.agent.verbose:
|
||||
self._printer.print(
|
||||
content="Long term memory is enabled, but entity memory is not enabled. Please configure entity memory or set memory=True to automatically enable it.",
|
||||
color="bold_yellow",
|
||||
)
|
||||
self._printer.print(
|
||||
content="Long term memory is enabled, but entity memory is not enabled. Please configure entity memory or set memory=True to automatically enable it.",
|
||||
color="bold_yellow",
|
||||
)
|
||||
|
||||
def _ask_human_input(self, final_answer: str) -> str:
|
||||
"""Prompt human input with mode-appropriate messaging.
|
||||
|
||||
@@ -206,14 +206,13 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
try:
|
||||
formatted_answer = self._invoke_loop()
|
||||
except AssertionError:
|
||||
if self.agent.verbose:
|
||||
self._printer.print(
|
||||
content="Agent failed to reach a final answer. This is likely a bug - please report it.",
|
||||
color="red",
|
||||
)
|
||||
self._printer.print(
|
||||
content="Agent failed to reach a final answer. This is likely a bug - please report it.",
|
||||
color="red",
|
||||
)
|
||||
raise
|
||||
except Exception as e:
|
||||
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise
|
||||
|
||||
if self.ask_for_human_input:
|
||||
@@ -328,7 +327,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
messages=self.messages,
|
||||
llm=self.llm,
|
||||
callbacks=self.callbacks,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
break
|
||||
|
||||
@@ -343,41 +341,22 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
from_agent=self.agent,
|
||||
response_model=self.response_model,
|
||||
executor_context=self,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
# breakpoint()
|
||||
if self.response_model is not None:
|
||||
try:
|
||||
if isinstance(answer, BaseModel):
|
||||
output_json = answer.model_dump_json()
|
||||
formatted_answer = AgentFinish(
|
||||
thought="",
|
||||
output=answer,
|
||||
text=output_json,
|
||||
)
|
||||
else:
|
||||
self.response_model.model_validate_json(answer)
|
||||
formatted_answer = AgentFinish(
|
||||
thought="",
|
||||
output=answer,
|
||||
text=answer,
|
||||
)
|
||||
except ValidationError:
|
||||
# If validation fails, convert BaseModel to JSON string for parsing
|
||||
answer_str = (
|
||||
answer.model_dump_json()
|
||||
if isinstance(answer, BaseModel)
|
||||
else str(answer)
|
||||
self.response_model.model_validate_json(answer)
|
||||
formatted_answer = AgentFinish(
|
||||
thought="",
|
||||
output=answer,
|
||||
text=answer,
|
||||
)
|
||||
except ValidationError:
|
||||
formatted_answer = process_llm_response(
|
||||
answer_str, self.use_stop_words
|
||||
answer, self.use_stop_words
|
||||
) # type: ignore[assignment]
|
||||
else:
|
||||
# When no response_model, answer should be a string
|
||||
answer_str = str(answer) if not isinstance(answer, str) else answer
|
||||
formatted_answer = process_llm_response(
|
||||
answer_str, self.use_stop_words
|
||||
) # type: ignore[assignment]
|
||||
formatted_answer = process_llm_response(answer, self.use_stop_words) # type: ignore[assignment]
|
||||
|
||||
if isinstance(formatted_answer, AgentAction):
|
||||
# Extract agent fingerprint if available
|
||||
@@ -420,7 +399,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
iterations=self.iterations,
|
||||
log_error_after=self.log_error_after,
|
||||
printer=self._printer,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
@@ -435,10 +413,9 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
llm=self.llm,
|
||||
callbacks=self.callbacks,
|
||||
i18n=self._i18n,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
continue
|
||||
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise e
|
||||
finally:
|
||||
self.iterations += 1
|
||||
@@ -484,7 +461,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
messages=self.messages,
|
||||
llm=self.llm,
|
||||
callbacks=self.callbacks,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
@@ -506,7 +482,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
from_agent=self.agent,
|
||||
response_model=self.response_model,
|
||||
executor_context=self,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
|
||||
# Check if the response is a list of tool calls
|
||||
@@ -538,18 +513,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
|
||||
if isinstance(answer, BaseModel):
|
||||
output_json = answer.model_dump_json()
|
||||
formatted_answer = AgentFinish(
|
||||
thought="",
|
||||
output=answer,
|
||||
text=output_json,
|
||||
)
|
||||
self._invoke_step_callback(formatted_answer)
|
||||
self._append_message(output_json)
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
|
||||
# Unexpected response type, treat as final answer
|
||||
formatted_answer = AgentFinish(
|
||||
thought="",
|
||||
@@ -572,10 +535,9 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
llm=self.llm,
|
||||
callbacks=self.callbacks,
|
||||
i18n=self._i18n,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
continue
|
||||
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise e
|
||||
finally:
|
||||
self.iterations += 1
|
||||
@@ -597,23 +559,13 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
from_agent=self.agent,
|
||||
response_model=self.response_model,
|
||||
executor_context=self,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
|
||||
if isinstance(answer, BaseModel):
|
||||
output_json = answer.model_dump_json()
|
||||
formatted_answer = AgentFinish(
|
||||
thought="",
|
||||
output=answer,
|
||||
text=output_json,
|
||||
)
|
||||
else:
|
||||
answer_str = answer if isinstance(answer, str) else str(answer)
|
||||
formatted_answer = AgentFinish(
|
||||
thought="",
|
||||
output=answer_str,
|
||||
text=answer_str,
|
||||
)
|
||||
formatted_answer = AgentFinish(
|
||||
thought="",
|
||||
output=str(answer),
|
||||
text=str(answer),
|
||||
)
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
|
||||
@@ -803,10 +755,10 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
track_delegation_if_needed(func_name, args_dict, self.task)
|
||||
|
||||
# Find the structured tool for hook context
|
||||
structured_tool: CrewStructuredTool | None = None
|
||||
for structured in self.tools or []:
|
||||
if sanitize_tool_name(structured.name) == func_name:
|
||||
structured_tool = structured
|
||||
structured_tool = None
|
||||
for tool in self.tools or []:
|
||||
if sanitize_tool_name(tool.name) == func_name:
|
||||
structured_tool = tool
|
||||
break
|
||||
|
||||
# Execute before_tool_call hooks
|
||||
@@ -827,11 +779,10 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
hook_blocked = True
|
||||
break
|
||||
except Exception as hook_error:
|
||||
if self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=f"Error in before_tool_call hook: {hook_error}",
|
||||
color="red",
|
||||
)
|
||||
self._printer.print(
|
||||
content=f"Error in before_tool_call hook: {hook_error}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
# If hook blocked execution, set result and skip tool execution
|
||||
if hook_blocked:
|
||||
@@ -897,16 +848,15 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
after_hooks = get_after_tool_call_hooks()
|
||||
try:
|
||||
for after_hook in after_hooks:
|
||||
after_hook_result = after_hook(after_hook_context)
|
||||
if after_hook_result is not None:
|
||||
result = after_hook_result
|
||||
hook_result = after_hook(after_hook_context)
|
||||
if hook_result is not None:
|
||||
result = hook_result
|
||||
after_hook_context.tool_result = result
|
||||
except Exception as hook_error:
|
||||
if self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=f"Error in after_tool_call hook: {hook_error}",
|
||||
color="red",
|
||||
)
|
||||
self._printer.print(
|
||||
content=f"Error in after_tool_call hook: {hook_error}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
# Emit tool usage finished event
|
||||
crewai_event_bus.emit(
|
||||
@@ -992,14 +942,13 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
try:
|
||||
formatted_answer = await self._ainvoke_loop()
|
||||
except AssertionError:
|
||||
if self.agent.verbose:
|
||||
self._printer.print(
|
||||
content="Agent failed to reach a final answer. This is likely a bug - please report it.",
|
||||
color="red",
|
||||
)
|
||||
self._printer.print(
|
||||
content="Agent failed to reach a final answer. This is likely a bug - please report it.",
|
||||
color="red",
|
||||
)
|
||||
raise
|
||||
except Exception as e:
|
||||
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise
|
||||
|
||||
if self.ask_for_human_input:
|
||||
@@ -1050,7 +999,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
messages=self.messages,
|
||||
llm=self.llm,
|
||||
callbacks=self.callbacks,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
break
|
||||
|
||||
@@ -1065,41 +1013,22 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
from_agent=self.agent,
|
||||
response_model=self.response_model,
|
||||
executor_context=self,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
|
||||
if self.response_model is not None:
|
||||
try:
|
||||
if isinstance(answer, BaseModel):
|
||||
output_json = answer.model_dump_json()
|
||||
formatted_answer = AgentFinish(
|
||||
thought="",
|
||||
output=answer,
|
||||
text=output_json,
|
||||
)
|
||||
else:
|
||||
self.response_model.model_validate_json(answer)
|
||||
formatted_answer = AgentFinish(
|
||||
thought="",
|
||||
output=answer,
|
||||
text=answer,
|
||||
)
|
||||
except ValidationError:
|
||||
# If validation fails, convert BaseModel to JSON string for parsing
|
||||
answer_str = (
|
||||
answer.model_dump_json()
|
||||
if isinstance(answer, BaseModel)
|
||||
else str(answer)
|
||||
self.response_model.model_validate_json(answer)
|
||||
formatted_answer = AgentFinish(
|
||||
thought="",
|
||||
output=answer,
|
||||
text=answer,
|
||||
)
|
||||
except ValidationError:
|
||||
formatted_answer = process_llm_response(
|
||||
answer_str, self.use_stop_words
|
||||
answer, self.use_stop_words
|
||||
) # type: ignore[assignment]
|
||||
else:
|
||||
# When no response_model, answer should be a string
|
||||
answer_str = str(answer) if not isinstance(answer, str) else answer
|
||||
formatted_answer = process_llm_response(
|
||||
answer_str, self.use_stop_words
|
||||
) # type: ignore[assignment]
|
||||
formatted_answer = process_llm_response(answer, self.use_stop_words) # type: ignore[assignment]
|
||||
|
||||
if isinstance(formatted_answer, AgentAction):
|
||||
fingerprint_context = {}
|
||||
@@ -1141,7 +1070,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
iterations=self.iterations,
|
||||
log_error_after=self.log_error_after,
|
||||
printer=self._printer,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
@@ -1155,10 +1083,9 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
llm=self.llm,
|
||||
callbacks=self.callbacks,
|
||||
i18n=self._i18n,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
continue
|
||||
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise e
|
||||
finally:
|
||||
self.iterations += 1
|
||||
@@ -1198,7 +1125,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
messages=self.messages,
|
||||
llm=self.llm,
|
||||
callbacks=self.callbacks,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
@@ -1220,7 +1146,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
from_agent=self.agent,
|
||||
response_model=self.response_model,
|
||||
executor_context=self,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
# Check if the response is a list of tool calls
|
||||
if (
|
||||
@@ -1251,18 +1176,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
|
||||
if isinstance(answer, BaseModel):
|
||||
output_json = answer.model_dump_json()
|
||||
formatted_answer = AgentFinish(
|
||||
thought="",
|
||||
output=answer,
|
||||
text=output_json,
|
||||
)
|
||||
self._invoke_step_callback(formatted_answer)
|
||||
self._append_message(output_json)
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
|
||||
# Unexpected response type, treat as final answer
|
||||
formatted_answer = AgentFinish(
|
||||
thought="",
|
||||
@@ -1285,10 +1198,9 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
llm=self.llm,
|
||||
callbacks=self.callbacks,
|
||||
i18n=self._i18n,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
continue
|
||||
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise e
|
||||
finally:
|
||||
self.iterations += 1
|
||||
@@ -1310,23 +1222,13 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
from_agent=self.agent,
|
||||
response_model=self.response_model,
|
||||
executor_context=self,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
|
||||
if isinstance(answer, BaseModel):
|
||||
output_json = answer.model_dump_json()
|
||||
formatted_answer = AgentFinish(
|
||||
thought="",
|
||||
output=answer,
|
||||
text=output_json,
|
||||
)
|
||||
else:
|
||||
answer_str = answer if isinstance(answer, str) else str(answer)
|
||||
formatted_answer = AgentFinish(
|
||||
thought="",
|
||||
output=answer_str,
|
||||
text=answer_str,
|
||||
)
|
||||
formatted_answer = AgentFinish(
|
||||
thought="",
|
||||
output=str(answer),
|
||||
text=str(answer),
|
||||
)
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
|
||||
@@ -1437,11 +1339,10 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
)
|
||||
|
||||
if train_iteration is None or not isinstance(train_iteration, int):
|
||||
if self.agent.verbose:
|
||||
self._printer.print(
|
||||
content="Invalid or missing train iteration. Cannot save training data.",
|
||||
color="red",
|
||||
)
|
||||
self._printer.print(
|
||||
content="Invalid or missing train iteration. Cannot save training data.",
|
||||
color="red",
|
||||
)
|
||||
return
|
||||
|
||||
training_handler = CrewTrainingHandler(TRAINING_DATA_FILE)
|
||||
@@ -1461,14 +1362,13 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
if train_iteration in agent_training_data:
|
||||
agent_training_data[train_iteration]["improved_output"] = result.output
|
||||
else:
|
||||
if self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=(
|
||||
f"No existing training data for agent {agent_id} and iteration "
|
||||
f"{train_iteration}. Cannot save improved output."
|
||||
),
|
||||
color="red",
|
||||
)
|
||||
self._printer.print(
|
||||
content=(
|
||||
f"No existing training data for agent {agent_id} and iteration "
|
||||
f"{train_iteration}. Cannot save improved output."
|
||||
),
|
||||
color="red",
|
||||
)
|
||||
return
|
||||
|
||||
# Update the training data and save
|
||||
@@ -1499,12 +1399,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
Returns:
|
||||
Final answer after feedback.
|
||||
"""
|
||||
output_str = (
|
||||
formatted_answer.output
|
||||
if isinstance(formatted_answer.output, str)
|
||||
else formatted_answer.output.model_dump_json()
|
||||
)
|
||||
human_feedback = self._ask_human_input(output_str)
|
||||
human_feedback = self._ask_human_input(formatted_answer.output)
|
||||
|
||||
if self._is_training_mode():
|
||||
return self._handle_training_feedback(formatted_answer, human_feedback)
|
||||
@@ -1563,12 +1458,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self.ask_for_human_input = False
|
||||
else:
|
||||
answer = self._process_feedback_iteration(feedback)
|
||||
output_str = (
|
||||
answer.output
|
||||
if isinstance(answer.output, str)
|
||||
else answer.output.model_dump_json()
|
||||
)
|
||||
feedback = self._ask_human_input(output_str)
|
||||
feedback = self._ask_human_input(answer.output)
|
||||
|
||||
return answer
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@ AgentAction or AgentFinish objects.
|
||||
from dataclasses import dataclass
|
||||
|
||||
from json_repair import repair_json # type: ignore[import-untyped]
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.agents.constants import (
|
||||
ACTION_INPUT_ONLY_REGEX,
|
||||
@@ -41,7 +40,7 @@ class AgentFinish:
|
||||
"""Represents the final answer from an agent."""
|
||||
|
||||
thought: str
|
||||
output: str | BaseModel
|
||||
output: str
|
||||
text: str
|
||||
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]==1.9.2"
|
||||
"crewai[tools]==1.9.0"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]==1.9.2"
|
||||
"crewai[tools]==1.9.0"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
from typing import Annotated
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from crewai.events.types.a2a_events import (
|
||||
A2AAgentCardFetchedEvent,
|
||||
A2AArtifactReceivedEvent,
|
||||
@@ -102,7 +106,7 @@ from crewai.events.types.tool_usage_events import (
|
||||
)
|
||||
|
||||
|
||||
EventTypes = (
|
||||
EventTypes = Annotated[
|
||||
A2AAgentCardFetchedEvent
|
||||
| A2AArtifactReceivedEvent
|
||||
| A2AAuthenticationFailedEvent
|
||||
@@ -180,5 +184,6 @@ EventTypes = (
|
||||
| MCPConnectionFailedEvent
|
||||
| MCPToolExecutionStartedEvent
|
||||
| MCPToolExecutionCompletedEvent
|
||||
| MCPToolExecutionFailedEvent
|
||||
)
|
||||
| MCPToolExecutionFailedEvent,
|
||||
Field(discriminator="type"),
|
||||
]
|
||||
|
||||
@@ -73,7 +73,7 @@ class A2ADelegationStartedEvent(A2AEventBase):
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: str = "a2a_delegation_started"
|
||||
type: Literal["a2a_delegation_started"] = "a2a_delegation_started"
|
||||
endpoint: str
|
||||
task_description: str
|
||||
agent_id: str
|
||||
@@ -106,7 +106,7 @@ class A2ADelegationCompletedEvent(A2AEventBase):
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: str = "a2a_delegation_completed"
|
||||
type: Literal["a2a_delegation_completed"] = "a2a_delegation_completed"
|
||||
status: str
|
||||
result: str | None = None
|
||||
error: str | None = None
|
||||
@@ -140,7 +140,7 @@ class A2AConversationStartedEvent(A2AEventBase):
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: str = "a2a_conversation_started"
|
||||
type: Literal["a2a_conversation_started"] = "a2a_conversation_started"
|
||||
agent_id: str
|
||||
endpoint: str
|
||||
context_id: str | None = None
|
||||
@@ -171,7 +171,7 @@ class A2AMessageSentEvent(A2AEventBase):
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: str = "a2a_message_sent"
|
||||
type: Literal["a2a_message_sent"] = "a2a_message_sent"
|
||||
message: str
|
||||
turn_number: int
|
||||
context_id: str | None = None
|
||||
@@ -203,7 +203,7 @@ class A2AResponseReceivedEvent(A2AEventBase):
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: str = "a2a_response_received"
|
||||
type: Literal["a2a_response_received"] = "a2a_response_received"
|
||||
response: str
|
||||
turn_number: int
|
||||
context_id: str | None = None
|
||||
@@ -237,7 +237,7 @@ class A2AConversationCompletedEvent(A2AEventBase):
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: str = "a2a_conversation_completed"
|
||||
type: Literal["a2a_conversation_completed"] = "a2a_conversation_completed"
|
||||
status: Literal["completed", "failed"]
|
||||
final_result: str | None = None
|
||||
error: str | None = None
|
||||
@@ -263,7 +263,7 @@ class A2APollingStartedEvent(A2AEventBase):
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_polling_started"
|
||||
type: Literal["a2a_polling_started"] = "a2a_polling_started"
|
||||
task_id: str
|
||||
context_id: str | None = None
|
||||
polling_interval: float
|
||||
@@ -286,7 +286,7 @@ class A2APollingStatusEvent(A2AEventBase):
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_polling_status"
|
||||
type: Literal["a2a_polling_status"] = "a2a_polling_status"
|
||||
task_id: str
|
||||
context_id: str | None = None
|
||||
state: str
|
||||
@@ -309,7 +309,7 @@ class A2APushNotificationRegisteredEvent(A2AEventBase):
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_push_notification_registered"
|
||||
type: Literal["a2a_push_notification_registered"] = "a2a_push_notification_registered"
|
||||
task_id: str
|
||||
context_id: str | None = None
|
||||
callback_url: str
|
||||
@@ -334,7 +334,7 @@ class A2APushNotificationReceivedEvent(A2AEventBase):
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_push_notification_received"
|
||||
type: Literal["a2a_push_notification_received"] = "a2a_push_notification_received"
|
||||
task_id: str
|
||||
context_id: str | None = None
|
||||
state: str
|
||||
@@ -359,7 +359,7 @@ class A2APushNotificationSentEvent(A2AEventBase):
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_push_notification_sent"
|
||||
type: Literal["a2a_push_notification_sent"] = "a2a_push_notification_sent"
|
||||
task_id: str
|
||||
context_id: str | None = None
|
||||
callback_url: str
|
||||
@@ -381,7 +381,7 @@ class A2APushNotificationTimeoutEvent(A2AEventBase):
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_push_notification_timeout"
|
||||
type: Literal["a2a_push_notification_timeout"] = "a2a_push_notification_timeout"
|
||||
task_id: str
|
||||
context_id: str | None = None
|
||||
timeout_seconds: float
|
||||
@@ -405,7 +405,7 @@ class A2AStreamingStartedEvent(A2AEventBase):
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: str = "a2a_streaming_started"
|
||||
type: Literal["a2a_streaming_started"] = "a2a_streaming_started"
|
||||
task_id: str | None = None
|
||||
context_id: str | None = None
|
||||
endpoint: str
|
||||
@@ -434,7 +434,7 @@ class A2AStreamingChunkEvent(A2AEventBase):
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: str = "a2a_streaming_chunk"
|
||||
type: Literal["a2a_streaming_chunk"] = "a2a_streaming_chunk"
|
||||
task_id: str | None = None
|
||||
context_id: str | None = None
|
||||
chunk: str
|
||||
@@ -462,7 +462,7 @@ class A2AAgentCardFetchedEvent(A2AEventBase):
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_agent_card_fetched"
|
||||
type: Literal["a2a_agent_card_fetched"] = "a2a_agent_card_fetched"
|
||||
endpoint: str
|
||||
a2a_agent_name: str | None = None
|
||||
agent_card: dict[str, Any] | None = None
|
||||
@@ -486,7 +486,7 @@ class A2AAuthenticationFailedEvent(A2AEventBase):
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_authentication_failed"
|
||||
type: Literal["a2a_authentication_failed"] = "a2a_authentication_failed"
|
||||
endpoint: str
|
||||
auth_type: str | None = None
|
||||
error: str
|
||||
@@ -517,7 +517,7 @@ class A2AArtifactReceivedEvent(A2AEventBase):
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: str = "a2a_artifact_received"
|
||||
type: Literal["a2a_artifact_received"] = "a2a_artifact_received"
|
||||
task_id: str
|
||||
artifact_id: str
|
||||
artifact_name: str | None = None
|
||||
@@ -550,7 +550,7 @@ class A2AConnectionErrorEvent(A2AEventBase):
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_connection_error"
|
||||
type: Literal["a2a_connection_error"] = "a2a_connection_error"
|
||||
endpoint: str
|
||||
error: str
|
||||
error_type: str | None = None
|
||||
@@ -571,7 +571,7 @@ class A2AServerTaskStartedEvent(A2AEventBase):
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_server_task_started"
|
||||
type: Literal["a2a_server_task_started"] = "a2a_server_task_started"
|
||||
task_id: str
|
||||
context_id: str
|
||||
metadata: dict[str, Any] | None = None
|
||||
@@ -587,7 +587,7 @@ class A2AServerTaskCompletedEvent(A2AEventBase):
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_server_task_completed"
|
||||
type: Literal["a2a_server_task_completed"] = "a2a_server_task_completed"
|
||||
task_id: str
|
||||
context_id: str
|
||||
result: str
|
||||
@@ -603,7 +603,7 @@ class A2AServerTaskCanceledEvent(A2AEventBase):
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_server_task_canceled"
|
||||
type: Literal["a2a_server_task_canceled"] = "a2a_server_task_canceled"
|
||||
task_id: str
|
||||
context_id: str
|
||||
metadata: dict[str, Any] | None = None
|
||||
@@ -619,7 +619,7 @@ class A2AServerTaskFailedEvent(A2AEventBase):
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_server_task_failed"
|
||||
type: Literal["a2a_server_task_failed"] = "a2a_server_task_failed"
|
||||
task_id: str
|
||||
context_id: str
|
||||
error: str
|
||||
@@ -634,7 +634,7 @@ class A2AParallelDelegationStartedEvent(A2AEventBase):
|
||||
task_description: Description of the task being delegated.
|
||||
"""
|
||||
|
||||
type: str = "a2a_parallel_delegation_started"
|
||||
type: Literal["a2a_parallel_delegation_started"] = "a2a_parallel_delegation_started"
|
||||
endpoints: list[str]
|
||||
task_description: str
|
||||
|
||||
@@ -649,7 +649,7 @@ class A2AParallelDelegationCompletedEvent(A2AEventBase):
|
||||
results: Summary of results from each agent.
|
||||
"""
|
||||
|
||||
type: str = "a2a_parallel_delegation_completed"
|
||||
type: Literal["a2a_parallel_delegation_completed"] = "a2a_parallel_delegation_completed"
|
||||
endpoints: list[str]
|
||||
success_count: int
|
||||
failure_count: int
|
||||
|
||||
@@ -2,8 +2,7 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Sequence
|
||||
from typing import Any
|
||||
from typing import Any, Literal
|
||||
|
||||
from pydantic import ConfigDict, model_validator
|
||||
|
||||
@@ -18,9 +17,9 @@ class AgentExecutionStartedEvent(BaseEvent):
|
||||
|
||||
agent: BaseAgent
|
||||
task: Any
|
||||
tools: Sequence[BaseTool | CrewStructuredTool] | None
|
||||
tools: list[BaseTool | CrewStructuredTool] | None
|
||||
task_prompt: str
|
||||
type: str = "agent_execution_started"
|
||||
type: Literal["agent_execution_started"] = "agent_execution_started"
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
@@ -44,7 +43,7 @@ class AgentExecutionCompletedEvent(BaseEvent):
|
||||
agent: BaseAgent
|
||||
task: Any
|
||||
output: str
|
||||
type: str = "agent_execution_completed"
|
||||
type: Literal["agent_execution_completed"] = "agent_execution_completed"
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
@@ -68,7 +67,7 @@ class AgentExecutionErrorEvent(BaseEvent):
|
||||
agent: BaseAgent
|
||||
task: Any
|
||||
error: str
|
||||
type: str = "agent_execution_error"
|
||||
type: Literal["agent_execution_error"] = "agent_execution_error"
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
@@ -91,9 +90,9 @@ class LiteAgentExecutionStartedEvent(BaseEvent):
|
||||
"""Event emitted when a LiteAgent starts executing"""
|
||||
|
||||
agent_info: dict[str, Any]
|
||||
tools: Sequence[BaseTool | CrewStructuredTool] | None
|
||||
tools: list[BaseTool | CrewStructuredTool] | None
|
||||
messages: str | list[dict[str, str]]
|
||||
type: str = "lite_agent_execution_started"
|
||||
type: Literal["lite_agent_execution_started"] = "lite_agent_execution_started"
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
@@ -103,7 +102,7 @@ class LiteAgentExecutionCompletedEvent(BaseEvent):
|
||||
|
||||
agent_info: dict[str, Any]
|
||||
output: str
|
||||
type: str = "lite_agent_execution_completed"
|
||||
type: Literal["lite_agent_execution_completed"] = "lite_agent_execution_completed"
|
||||
|
||||
|
||||
class LiteAgentExecutionErrorEvent(BaseEvent):
|
||||
@@ -111,7 +110,7 @@ class LiteAgentExecutionErrorEvent(BaseEvent):
|
||||
|
||||
agent_info: dict[str, Any]
|
||||
error: str
|
||||
type: str = "lite_agent_execution_error"
|
||||
type: Literal["lite_agent_execution_error"] = "lite_agent_execution_error"
|
||||
|
||||
|
||||
# Agent Eval events
|
||||
@@ -120,7 +119,7 @@ class AgentEvaluationStartedEvent(BaseEvent):
|
||||
agent_role: str
|
||||
task_id: str | None = None
|
||||
iteration: int
|
||||
type: str = "agent_evaluation_started"
|
||||
type: Literal["agent_evaluation_started"] = "agent_evaluation_started"
|
||||
|
||||
|
||||
class AgentEvaluationCompletedEvent(BaseEvent):
|
||||
@@ -130,7 +129,7 @@ class AgentEvaluationCompletedEvent(BaseEvent):
|
||||
iteration: int
|
||||
metric_category: Any
|
||||
score: Any
|
||||
type: str = "agent_evaluation_completed"
|
||||
type: Literal["agent_evaluation_completed"] = "agent_evaluation_completed"
|
||||
|
||||
|
||||
class AgentEvaluationFailedEvent(BaseEvent):
|
||||
@@ -139,4 +138,4 @@ class AgentEvaluationFailedEvent(BaseEvent):
|
||||
task_id: str | None = None
|
||||
iteration: int
|
||||
error: str
|
||||
type: str = "agent_evaluation_failed"
|
||||
type: Literal["agent_evaluation_failed"] = "agent_evaluation_failed"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import TYPE_CHECKING, Any
|
||||
from typing import TYPE_CHECKING, Any, Literal
|
||||
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
@@ -40,14 +40,14 @@ class CrewKickoffStartedEvent(CrewBaseEvent):
|
||||
"""Event emitted when a crew starts execution"""
|
||||
|
||||
inputs: dict[str, Any] | None
|
||||
type: str = "crew_kickoff_started"
|
||||
type: Literal["crew_kickoff_started"] = "crew_kickoff_started"
|
||||
|
||||
|
||||
class CrewKickoffCompletedEvent(CrewBaseEvent):
|
||||
"""Event emitted when a crew completes execution"""
|
||||
|
||||
output: Any
|
||||
type: str = "crew_kickoff_completed"
|
||||
type: Literal["crew_kickoff_completed"] = "crew_kickoff_completed"
|
||||
total_tokens: int = 0
|
||||
|
||||
|
||||
@@ -55,7 +55,7 @@ class CrewKickoffFailedEvent(CrewBaseEvent):
|
||||
"""Event emitted when a crew fails to complete execution"""
|
||||
|
||||
error: str
|
||||
type: str = "crew_kickoff_failed"
|
||||
type: Literal["crew_kickoff_failed"] = "crew_kickoff_failed"
|
||||
|
||||
|
||||
class CrewTrainStartedEvent(CrewBaseEvent):
|
||||
@@ -64,7 +64,7 @@ class CrewTrainStartedEvent(CrewBaseEvent):
|
||||
n_iterations: int
|
||||
filename: str
|
||||
inputs: dict[str, Any] | None
|
||||
type: str = "crew_train_started"
|
||||
type: Literal["crew_train_started"] = "crew_train_started"
|
||||
|
||||
|
||||
class CrewTrainCompletedEvent(CrewBaseEvent):
|
||||
@@ -72,14 +72,14 @@ class CrewTrainCompletedEvent(CrewBaseEvent):
|
||||
|
||||
n_iterations: int
|
||||
filename: str
|
||||
type: str = "crew_train_completed"
|
||||
type: Literal["crew_train_completed"] = "crew_train_completed"
|
||||
|
||||
|
||||
class CrewTrainFailedEvent(CrewBaseEvent):
|
||||
"""Event emitted when a crew fails to complete training"""
|
||||
|
||||
error: str
|
||||
type: str = "crew_train_failed"
|
||||
type: Literal["crew_train_failed"] = "crew_train_failed"
|
||||
|
||||
|
||||
class CrewTestStartedEvent(CrewBaseEvent):
|
||||
@@ -88,20 +88,20 @@ class CrewTestStartedEvent(CrewBaseEvent):
|
||||
n_iterations: int
|
||||
eval_llm: str | Any | None
|
||||
inputs: dict[str, Any] | None
|
||||
type: str = "crew_test_started"
|
||||
type: Literal["crew_test_started"] = "crew_test_started"
|
||||
|
||||
|
||||
class CrewTestCompletedEvent(CrewBaseEvent):
|
||||
"""Event emitted when a crew completes testing"""
|
||||
|
||||
type: str = "crew_test_completed"
|
||||
type: Literal["crew_test_completed"] = "crew_test_completed"
|
||||
|
||||
|
||||
class CrewTestFailedEvent(CrewBaseEvent):
|
||||
"""Event emitted when a crew fails to complete testing"""
|
||||
|
||||
error: str
|
||||
type: str = "crew_test_failed"
|
||||
type: Literal["crew_test_failed"] = "crew_test_failed"
|
||||
|
||||
|
||||
class CrewTestResultEvent(CrewBaseEvent):
|
||||
@@ -110,4 +110,4 @@ class CrewTestResultEvent(CrewBaseEvent):
|
||||
quality: float
|
||||
execution_duration: float
|
||||
model: str
|
||||
type: str = "crew_test_result"
|
||||
type: Literal["crew_test_result"] = "crew_test_result"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any
|
||||
from typing import Any, Literal
|
||||
|
||||
from pydantic import BaseModel, ConfigDict
|
||||
|
||||
@@ -17,14 +17,14 @@ class FlowStartedEvent(FlowEvent):
|
||||
|
||||
flow_name: str
|
||||
inputs: dict[str, Any] | None = None
|
||||
type: str = "flow_started"
|
||||
type: Literal["flow_started"] = "flow_started"
|
||||
|
||||
|
||||
class FlowCreatedEvent(FlowEvent):
|
||||
"""Event emitted when a flow is created"""
|
||||
|
||||
flow_name: str
|
||||
type: str = "flow_created"
|
||||
type: Literal["flow_created"] = "flow_created"
|
||||
|
||||
|
||||
class MethodExecutionStartedEvent(FlowEvent):
|
||||
@@ -34,7 +34,7 @@ class MethodExecutionStartedEvent(FlowEvent):
|
||||
method_name: str
|
||||
state: dict[str, Any] | BaseModel
|
||||
params: dict[str, Any] | None = None
|
||||
type: str = "method_execution_started"
|
||||
type: Literal["method_execution_started"] = "method_execution_started"
|
||||
|
||||
|
||||
class MethodExecutionFinishedEvent(FlowEvent):
|
||||
@@ -44,7 +44,7 @@ class MethodExecutionFinishedEvent(FlowEvent):
|
||||
method_name: str
|
||||
result: Any = None
|
||||
state: dict[str, Any] | BaseModel
|
||||
type: str = "method_execution_finished"
|
||||
type: Literal["method_execution_finished"] = "method_execution_finished"
|
||||
|
||||
|
||||
class MethodExecutionFailedEvent(FlowEvent):
|
||||
@@ -53,7 +53,7 @@ class MethodExecutionFailedEvent(FlowEvent):
|
||||
flow_name: str
|
||||
method_name: str
|
||||
error: Exception
|
||||
type: str = "method_execution_failed"
|
||||
type: Literal["method_execution_failed"] = "method_execution_failed"
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
@@ -78,7 +78,7 @@ class MethodExecutionPausedEvent(FlowEvent):
|
||||
flow_id: str
|
||||
message: str
|
||||
emit: list[str] | None = None
|
||||
type: str = "method_execution_paused"
|
||||
type: Literal["method_execution_paused"] = "method_execution_paused"
|
||||
|
||||
|
||||
class FlowFinishedEvent(FlowEvent):
|
||||
@@ -86,7 +86,7 @@ class FlowFinishedEvent(FlowEvent):
|
||||
|
||||
flow_name: str
|
||||
result: Any | None = None
|
||||
type: str = "flow_finished"
|
||||
type: Literal["flow_finished"] = "flow_finished"
|
||||
state: dict[str, Any] | BaseModel
|
||||
|
||||
|
||||
@@ -110,14 +110,14 @@ class FlowPausedEvent(FlowEvent):
|
||||
state: dict[str, Any] | BaseModel
|
||||
message: str
|
||||
emit: list[str] | None = None
|
||||
type: str = "flow_paused"
|
||||
type: Literal["flow_paused"] = "flow_paused"
|
||||
|
||||
|
||||
class FlowPlotEvent(FlowEvent):
|
||||
"""Event emitted when a flow plot is created"""
|
||||
|
||||
flow_name: str
|
||||
type: str = "flow_plot"
|
||||
type: Literal["flow_plot"] = "flow_plot"
|
||||
|
||||
|
||||
class HumanFeedbackRequestedEvent(FlowEvent):
|
||||
@@ -138,7 +138,7 @@ class HumanFeedbackRequestedEvent(FlowEvent):
|
||||
output: Any
|
||||
message: str
|
||||
emit: list[str] | None = None
|
||||
type: str = "human_feedback_requested"
|
||||
type: Literal["human_feedback_requested"] = "human_feedback_requested"
|
||||
|
||||
|
||||
class HumanFeedbackReceivedEvent(FlowEvent):
|
||||
@@ -157,4 +157,4 @@ class HumanFeedbackReceivedEvent(FlowEvent):
|
||||
method_name: str
|
||||
feedback: str
|
||||
outcome: str | None = None
|
||||
type: str = "human_feedback_received"
|
||||
type: Literal["human_feedback_received"] = "human_feedback_received"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any
|
||||
from typing import Any, Literal
|
||||
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
@@ -20,14 +20,14 @@ class KnowledgeEventBase(BaseEvent):
|
||||
class KnowledgeRetrievalStartedEvent(KnowledgeEventBase):
|
||||
"""Event emitted when a knowledge retrieval is started."""
|
||||
|
||||
type: str = "knowledge_search_query_started"
|
||||
type: Literal["knowledge_search_query_started"] = "knowledge_search_query_started"
|
||||
|
||||
|
||||
class KnowledgeRetrievalCompletedEvent(KnowledgeEventBase):
|
||||
"""Event emitted when a knowledge retrieval is completed."""
|
||||
|
||||
query: str
|
||||
type: str = "knowledge_search_query_completed"
|
||||
type: Literal["knowledge_search_query_completed"] = "knowledge_search_query_completed"
|
||||
retrieved_knowledge: str
|
||||
|
||||
|
||||
@@ -35,13 +35,13 @@ class KnowledgeQueryStartedEvent(KnowledgeEventBase):
|
||||
"""Event emitted when a knowledge query is started."""
|
||||
|
||||
task_prompt: str
|
||||
type: str = "knowledge_query_started"
|
||||
type: Literal["knowledge_query_started"] = "knowledge_query_started"
|
||||
|
||||
|
||||
class KnowledgeQueryFailedEvent(KnowledgeEventBase):
|
||||
"""Event emitted when a knowledge query fails."""
|
||||
|
||||
type: str = "knowledge_query_failed"
|
||||
type: Literal["knowledge_query_failed"] = "knowledge_query_failed"
|
||||
error: str
|
||||
|
||||
|
||||
@@ -49,12 +49,12 @@ class KnowledgeQueryCompletedEvent(KnowledgeEventBase):
|
||||
"""Event emitted when a knowledge query is completed."""
|
||||
|
||||
query: str
|
||||
type: str = "knowledge_query_completed"
|
||||
type: Literal["knowledge_query_completed"] = "knowledge_query_completed"
|
||||
|
||||
|
||||
class KnowledgeSearchQueryFailedEvent(KnowledgeEventBase):
|
||||
"""Event emitted when a knowledge search query fails."""
|
||||
|
||||
query: str
|
||||
type: str = "knowledge_search_query_failed"
|
||||
type: Literal["knowledge_search_query_failed"] = "knowledge_search_query_failed"
|
||||
error: str
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from enum import Enum
|
||||
from typing import Any
|
||||
from typing import Any, Literal
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
@@ -42,7 +42,7 @@ class LLMCallStartedEvent(LLMEventBase):
|
||||
multimodal content (text, images, etc.)
|
||||
"""
|
||||
|
||||
type: str = "llm_call_started"
|
||||
type: Literal["llm_call_started"] = "llm_call_started"
|
||||
messages: str | list[dict[str, Any]] | None = None
|
||||
tools: list[dict[str, Any]] | None = None
|
||||
callbacks: list[Any] | None = None
|
||||
@@ -52,7 +52,7 @@ class LLMCallStartedEvent(LLMEventBase):
|
||||
class LLMCallCompletedEvent(LLMEventBase):
|
||||
"""Event emitted when a LLM call completes"""
|
||||
|
||||
type: str = "llm_call_completed"
|
||||
type: Literal["llm_call_completed"] = "llm_call_completed"
|
||||
messages: str | list[dict[str, Any]] | None = None
|
||||
response: Any
|
||||
call_type: LLMCallType
|
||||
@@ -62,7 +62,7 @@ class LLMCallFailedEvent(LLMEventBase):
|
||||
"""Event emitted when a LLM call fails"""
|
||||
|
||||
error: str
|
||||
type: str = "llm_call_failed"
|
||||
type: Literal["llm_call_failed"] = "llm_call_failed"
|
||||
|
||||
|
||||
class FunctionCall(BaseModel):
|
||||
@@ -80,7 +80,7 @@ class ToolCall(BaseModel):
|
||||
class LLMStreamChunkEvent(LLMEventBase):
|
||||
"""Event emitted when a streaming chunk is received"""
|
||||
|
||||
type: str = "llm_stream_chunk"
|
||||
type: Literal["llm_stream_chunk"] = "llm_stream_chunk"
|
||||
chunk: str
|
||||
tool_call: ToolCall | None = None
|
||||
call_type: LLMCallType | None = None
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from collections.abc import Callable
|
||||
from inspect import getsource
|
||||
from typing import Any
|
||||
from typing import Any, Literal
|
||||
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
@@ -27,7 +27,7 @@ class LLMGuardrailStartedEvent(LLMGuardrailBaseEvent):
|
||||
retry_count: The number of times the guardrail has been retried
|
||||
"""
|
||||
|
||||
type: str = "llm_guardrail_started"
|
||||
type: Literal["llm_guardrail_started"] = "llm_guardrail_started"
|
||||
guardrail: str | Callable
|
||||
retry_count: int
|
||||
|
||||
@@ -53,7 +53,7 @@ class LLMGuardrailCompletedEvent(LLMGuardrailBaseEvent):
|
||||
retry_count: The number of times the guardrail has been retried
|
||||
"""
|
||||
|
||||
type: str = "llm_guardrail_completed"
|
||||
type: Literal["llm_guardrail_completed"] = "llm_guardrail_completed"
|
||||
success: bool
|
||||
result: Any
|
||||
error: str | None = None
|
||||
@@ -68,6 +68,6 @@ class LLMGuardrailFailedEvent(LLMGuardrailBaseEvent):
|
||||
retry_count: The number of times the guardrail has been retried
|
||||
"""
|
||||
|
||||
type: str = "llm_guardrail_failed"
|
||||
type: Literal["llm_guardrail_failed"] = "llm_guardrail_failed"
|
||||
error: str
|
||||
retry_count: int
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
"""Agent logging events that don't reference BaseAgent to avoid circular imports."""
|
||||
|
||||
from typing import Any
|
||||
from typing import Any, Literal
|
||||
|
||||
from pydantic import ConfigDict
|
||||
|
||||
@@ -13,7 +13,7 @@ class AgentLogsStartedEvent(BaseEvent):
|
||||
agent_role: str
|
||||
task_description: str | None = None
|
||||
verbose: bool = False
|
||||
type: str = "agent_logs_started"
|
||||
type: Literal["agent_logs_started"] = "agent_logs_started"
|
||||
|
||||
|
||||
class AgentLogsExecutionEvent(BaseEvent):
|
||||
@@ -22,6 +22,6 @@ class AgentLogsExecutionEvent(BaseEvent):
|
||||
agent_role: str
|
||||
formatted_answer: Any
|
||||
verbose: bool = False
|
||||
type: str = "agent_logs_execution"
|
||||
type: Literal["agent_logs_execution"] = "agent_logs_execution"
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from datetime import datetime
|
||||
from typing import Any
|
||||
from typing import Any, Literal
|
||||
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
@@ -24,7 +24,7 @@ class MCPEvent(BaseEvent):
|
||||
class MCPConnectionStartedEvent(MCPEvent):
|
||||
"""Event emitted when starting to connect to an MCP server."""
|
||||
|
||||
type: str = "mcp_connection_started"
|
||||
type: Literal["mcp_connection_started"] = "mcp_connection_started"
|
||||
connect_timeout: int | None = None
|
||||
is_reconnect: bool = (
|
||||
False # True if this is a reconnection, False for first connection
|
||||
@@ -34,7 +34,7 @@ class MCPConnectionStartedEvent(MCPEvent):
|
||||
class MCPConnectionCompletedEvent(MCPEvent):
|
||||
"""Event emitted when successfully connected to an MCP server."""
|
||||
|
||||
type: str = "mcp_connection_completed"
|
||||
type: Literal["mcp_connection_completed"] = "mcp_connection_completed"
|
||||
started_at: datetime | None = None
|
||||
completed_at: datetime | None = None
|
||||
connection_duration_ms: float | None = None
|
||||
@@ -46,7 +46,7 @@ class MCPConnectionCompletedEvent(MCPEvent):
|
||||
class MCPConnectionFailedEvent(MCPEvent):
|
||||
"""Event emitted when connection to an MCP server fails."""
|
||||
|
||||
type: str = "mcp_connection_failed"
|
||||
type: Literal["mcp_connection_failed"] = "mcp_connection_failed"
|
||||
error: str
|
||||
error_type: str | None = None # "timeout", "authentication", "network", etc.
|
||||
started_at: datetime | None = None
|
||||
@@ -56,7 +56,7 @@ class MCPConnectionFailedEvent(MCPEvent):
|
||||
class MCPToolExecutionStartedEvent(MCPEvent):
|
||||
"""Event emitted when starting to execute an MCP tool."""
|
||||
|
||||
type: str = "mcp_tool_execution_started"
|
||||
type: Literal["mcp_tool_execution_started"] = "mcp_tool_execution_started"
|
||||
tool_name: str
|
||||
tool_args: dict[str, Any] | None = None
|
||||
|
||||
@@ -64,7 +64,7 @@ class MCPToolExecutionStartedEvent(MCPEvent):
|
||||
class MCPToolExecutionCompletedEvent(MCPEvent):
|
||||
"""Event emitted when MCP tool execution completes."""
|
||||
|
||||
type: str = "mcp_tool_execution_completed"
|
||||
type: Literal["mcp_tool_execution_completed"] = "mcp_tool_execution_completed"
|
||||
tool_name: str
|
||||
tool_args: dict[str, Any] | None = None
|
||||
result: Any | None = None
|
||||
@@ -76,7 +76,7 @@ class MCPToolExecutionCompletedEvent(MCPEvent):
|
||||
class MCPToolExecutionFailedEvent(MCPEvent):
|
||||
"""Event emitted when MCP tool execution fails."""
|
||||
|
||||
type: str = "mcp_tool_execution_failed"
|
||||
type: Literal["mcp_tool_execution_failed"] = "mcp_tool_execution_failed"
|
||||
tool_name: str
|
||||
tool_args: dict[str, Any] | None = None
|
||||
error: str
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any
|
||||
from typing import Any, Literal
|
||||
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
@@ -23,7 +23,7 @@ class MemoryBaseEvent(BaseEvent):
|
||||
class MemoryQueryStartedEvent(MemoryBaseEvent):
|
||||
"""Event emitted when a memory query is started"""
|
||||
|
||||
type: str = "memory_query_started"
|
||||
type: Literal["memory_query_started"] = "memory_query_started"
|
||||
query: str
|
||||
limit: int
|
||||
score_threshold: float | None = None
|
||||
@@ -32,7 +32,7 @@ class MemoryQueryStartedEvent(MemoryBaseEvent):
|
||||
class MemoryQueryCompletedEvent(MemoryBaseEvent):
|
||||
"""Event emitted when a memory query is completed successfully"""
|
||||
|
||||
type: str = "memory_query_completed"
|
||||
type: Literal["memory_query_completed"] = "memory_query_completed"
|
||||
query: str
|
||||
results: Any
|
||||
limit: int
|
||||
@@ -43,7 +43,7 @@ class MemoryQueryCompletedEvent(MemoryBaseEvent):
|
||||
class MemoryQueryFailedEvent(MemoryBaseEvent):
|
||||
"""Event emitted when a memory query fails"""
|
||||
|
||||
type: str = "memory_query_failed"
|
||||
type: Literal["memory_query_failed"] = "memory_query_failed"
|
||||
query: str
|
||||
limit: int
|
||||
score_threshold: float | None = None
|
||||
@@ -53,7 +53,7 @@ class MemoryQueryFailedEvent(MemoryBaseEvent):
|
||||
class MemorySaveStartedEvent(MemoryBaseEvent):
|
||||
"""Event emitted when a memory save operation is started"""
|
||||
|
||||
type: str = "memory_save_started"
|
||||
type: Literal["memory_save_started"] = "memory_save_started"
|
||||
value: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
agent_role: str | None = None
|
||||
@@ -62,7 +62,7 @@ class MemorySaveStartedEvent(MemoryBaseEvent):
|
||||
class MemorySaveCompletedEvent(MemoryBaseEvent):
|
||||
"""Event emitted when a memory save operation is completed successfully"""
|
||||
|
||||
type: str = "memory_save_completed"
|
||||
type: Literal["memory_save_completed"] = "memory_save_completed"
|
||||
value: str
|
||||
metadata: dict[str, Any] | None = None
|
||||
agent_role: str | None = None
|
||||
@@ -72,7 +72,7 @@ class MemorySaveCompletedEvent(MemoryBaseEvent):
|
||||
class MemorySaveFailedEvent(MemoryBaseEvent):
|
||||
"""Event emitted when a memory save operation fails"""
|
||||
|
||||
type: str = "memory_save_failed"
|
||||
type: Literal["memory_save_failed"] = "memory_save_failed"
|
||||
value: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
agent_role: str | None = None
|
||||
@@ -82,14 +82,14 @@ class MemorySaveFailedEvent(MemoryBaseEvent):
|
||||
class MemoryRetrievalStartedEvent(MemoryBaseEvent):
|
||||
"""Event emitted when memory retrieval for a task prompt starts"""
|
||||
|
||||
type: str = "memory_retrieval_started"
|
||||
type: Literal["memory_retrieval_started"] = "memory_retrieval_started"
|
||||
task_id: str | None = None
|
||||
|
||||
|
||||
class MemoryRetrievalCompletedEvent(MemoryBaseEvent):
|
||||
"""Event emitted when memory retrieval for a task prompt completes successfully"""
|
||||
|
||||
type: str = "memory_retrieval_completed"
|
||||
type: Literal["memory_retrieval_completed"] = "memory_retrieval_completed"
|
||||
task_id: str | None = None
|
||||
memory_content: str
|
||||
retrieval_time_ms: float
|
||||
@@ -98,6 +98,6 @@ class MemoryRetrievalCompletedEvent(MemoryBaseEvent):
|
||||
class MemoryRetrievalFailedEvent(MemoryBaseEvent):
|
||||
"""Event emitted when memory retrieval for a task prompt fails."""
|
||||
|
||||
type: str = "memory_retrieval_failed"
|
||||
type: Literal["memory_retrieval_failed"] = "memory_retrieval_failed"
|
||||
task_id: str | None = None
|
||||
error: str
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any
|
||||
from typing import Any, Literal
|
||||
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
@@ -24,7 +24,7 @@ class ReasoningEvent(BaseEvent):
|
||||
class AgentReasoningStartedEvent(ReasoningEvent):
|
||||
"""Event emitted when an agent starts reasoning about a task."""
|
||||
|
||||
type: str = "agent_reasoning_started"
|
||||
type: Literal["agent_reasoning_started"] = "agent_reasoning_started"
|
||||
agent_role: str
|
||||
task_id: str
|
||||
|
||||
@@ -32,7 +32,7 @@ class AgentReasoningStartedEvent(ReasoningEvent):
|
||||
class AgentReasoningCompletedEvent(ReasoningEvent):
|
||||
"""Event emitted when an agent finishes its reasoning process."""
|
||||
|
||||
type: str = "agent_reasoning_completed"
|
||||
type: Literal["agent_reasoning_completed"] = "agent_reasoning_completed"
|
||||
agent_role: str
|
||||
task_id: str
|
||||
plan: str
|
||||
@@ -42,7 +42,7 @@ class AgentReasoningCompletedEvent(ReasoningEvent):
|
||||
class AgentReasoningFailedEvent(ReasoningEvent):
|
||||
"""Event emitted when the reasoning process fails."""
|
||||
|
||||
type: str = "agent_reasoning_failed"
|
||||
type: Literal["agent_reasoning_failed"] = "agent_reasoning_failed"
|
||||
agent_role: str
|
||||
task_id: str
|
||||
error: str
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any
|
||||
from typing import Any, Literal
|
||||
|
||||
from crewai.events.base_events import BaseEvent
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
@@ -7,7 +7,7 @@ from crewai.tasks.task_output import TaskOutput
|
||||
class TaskStartedEvent(BaseEvent):
|
||||
"""Event emitted when a task starts"""
|
||||
|
||||
type: str = "task_started"
|
||||
type: Literal["task_started"] = "task_started"
|
||||
context: str | None
|
||||
task: Any | None = None
|
||||
|
||||
@@ -28,7 +28,7 @@ class TaskCompletedEvent(BaseEvent):
|
||||
"""Event emitted when a task completes"""
|
||||
|
||||
output: TaskOutput
|
||||
type: str = "task_completed"
|
||||
type: Literal["task_completed"] = "task_completed"
|
||||
task: Any | None = None
|
||||
|
||||
def __init__(self, **data):
|
||||
@@ -48,7 +48,7 @@ class TaskFailedEvent(BaseEvent):
|
||||
"""Event emitted when a task fails"""
|
||||
|
||||
error: str
|
||||
type: str = "task_failed"
|
||||
type: Literal["task_failed"] = "task_failed"
|
||||
task: Any | None = None
|
||||
|
||||
def __init__(self, **data):
|
||||
@@ -67,7 +67,7 @@ class TaskFailedEvent(BaseEvent):
|
||||
class TaskEvaluationEvent(BaseEvent):
|
||||
"""Event emitted when a task evaluation is completed"""
|
||||
|
||||
type: str = "task_evaluation"
|
||||
type: Literal["task_evaluation"] = "task_evaluation"
|
||||
evaluation_type: str
|
||||
task: Any | None = None
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from collections.abc import Callable
|
||||
from datetime import datetime
|
||||
from typing import Any
|
||||
from typing import Any, Literal
|
||||
|
||||
from pydantic import ConfigDict
|
||||
|
||||
@@ -55,7 +55,7 @@ class ToolUsageEvent(BaseEvent):
|
||||
class ToolUsageStartedEvent(ToolUsageEvent):
|
||||
"""Event emitted when a tool execution is started"""
|
||||
|
||||
type: str = "tool_usage_started"
|
||||
type: Literal["tool_usage_started"] = "tool_usage_started"
|
||||
|
||||
|
||||
class ToolUsageFinishedEvent(ToolUsageEvent):
|
||||
@@ -65,35 +65,35 @@ class ToolUsageFinishedEvent(ToolUsageEvent):
|
||||
finished_at: datetime
|
||||
from_cache: bool = False
|
||||
output: Any
|
||||
type: str = "tool_usage_finished"
|
||||
type: Literal["tool_usage_finished"] = "tool_usage_finished"
|
||||
|
||||
|
||||
class ToolUsageErrorEvent(ToolUsageEvent):
|
||||
"""Event emitted when a tool execution encounters an error"""
|
||||
|
||||
error: Any
|
||||
type: str = "tool_usage_error"
|
||||
type: Literal["tool_usage_error"] = "tool_usage_error"
|
||||
|
||||
|
||||
class ToolValidateInputErrorEvent(ToolUsageEvent):
|
||||
"""Event emitted when a tool input validation encounters an error"""
|
||||
|
||||
error: Any
|
||||
type: str = "tool_validate_input_error"
|
||||
type: Literal["tool_validate_input_error"] = "tool_validate_input_error"
|
||||
|
||||
|
||||
class ToolSelectionErrorEvent(ToolUsageEvent):
|
||||
"""Event emitted when a tool selection encounters an error"""
|
||||
|
||||
error: Any
|
||||
type: str = "tool_selection_error"
|
||||
type: Literal["tool_selection_error"] = "tool_selection_error"
|
||||
|
||||
|
||||
class ToolExecutionErrorEvent(BaseEvent):
|
||||
"""Event emitted when a tool execution encounters an error"""
|
||||
|
||||
error: Any
|
||||
type: str = "tool_execution_error"
|
||||
type: Literal["tool_execution_error"] = "tool_execution_error"
|
||||
tool_name: str
|
||||
tool_args: dict[str, Any]
|
||||
tool_class: Callable
|
||||
|
||||
@@ -341,7 +341,6 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
messages=list(self.state.messages),
|
||||
llm=self.llm,
|
||||
callbacks=self.callbacks,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
|
||||
self.state.current_answer = formatted_answer
|
||||
@@ -367,7 +366,6 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
from_agent=self.agent,
|
||||
response_model=None,
|
||||
executor_context=self,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
|
||||
# Parse the LLM response
|
||||
@@ -403,7 +401,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
return "context_error"
|
||||
if e.__class__.__module__.startswith("litellm"):
|
||||
raise e
|
||||
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise
|
||||
|
||||
@listen("continue_reasoning_native")
|
||||
@@ -438,7 +436,6 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
from_agent=self.agent,
|
||||
response_model=None,
|
||||
executor_context=self,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
|
||||
# Check if the response is a list of tool calls
|
||||
@@ -477,7 +474,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
return "context_error"
|
||||
if e.__class__.__module__.startswith("litellm"):
|
||||
raise e
|
||||
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise
|
||||
|
||||
@router(call_llm_and_parse)
|
||||
@@ -673,10 +670,10 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
|
||||
track_delegation_if_needed(func_name, args_dict, self.task)
|
||||
|
||||
structured_tool: CrewStructuredTool | None = None
|
||||
for structured in self.tools or []:
|
||||
if sanitize_tool_name(structured.name) == func_name:
|
||||
structured_tool = structured
|
||||
structured_tool = None
|
||||
for tool in self.tools or []:
|
||||
if sanitize_tool_name(tool.name) == func_name:
|
||||
structured_tool = tool
|
||||
break
|
||||
|
||||
hook_blocked = False
|
||||
@@ -696,11 +693,10 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
hook_blocked = True
|
||||
break
|
||||
except Exception as hook_error:
|
||||
if self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=f"Error in before_tool_call hook: {hook_error}",
|
||||
color="red",
|
||||
)
|
||||
self._printer.print(
|
||||
content=f"Error in before_tool_call hook: {hook_error}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
if hook_blocked:
|
||||
result = f"Tool execution blocked by hook. Tool: {func_name}"
|
||||
@@ -762,16 +758,15 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
after_hooks = get_after_tool_call_hooks()
|
||||
try:
|
||||
for after_hook in after_hooks:
|
||||
after_hook_result = after_hook(after_hook_context)
|
||||
if after_hook_result is not None:
|
||||
result = after_hook_result
|
||||
hook_result = after_hook(after_hook_context)
|
||||
if hook_result is not None:
|
||||
result = hook_result
|
||||
after_hook_context.tool_result = result
|
||||
except Exception as hook_error:
|
||||
if self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=f"Error in after_tool_call hook: {hook_error}",
|
||||
color="red",
|
||||
)
|
||||
self._printer.print(
|
||||
content=f"Error in after_tool_call hook: {hook_error}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
# Emit tool usage finished event
|
||||
crewai_event_bus.emit(
|
||||
@@ -819,6 +814,15 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
self.state.is_finished = True
|
||||
return "tool_result_is_final"
|
||||
|
||||
# Add reflection prompt once after all tools in the batch
|
||||
reasoning_prompt = self._i18n.slice("post_tool_reasoning")
|
||||
|
||||
reasoning_message: LLMMessage = {
|
||||
"role": "user",
|
||||
"content": reasoning_prompt,
|
||||
}
|
||||
self.state.messages.append(reasoning_message)
|
||||
|
||||
return "native_tool_completed"
|
||||
|
||||
def _extract_tool_name(self, tool_call: Any) -> str:
|
||||
@@ -907,7 +911,6 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
iterations=self.state.iterations,
|
||||
log_error_after=self.log_error_after,
|
||||
printer=self._printer,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
|
||||
if formatted_answer:
|
||||
@@ -927,7 +930,6 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
llm=self.llm,
|
||||
callbacks=self.callbacks,
|
||||
i18n=self._i18n,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
|
||||
self.state.iterations += 1
|
||||
@@ -1019,7 +1021,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
self._console.print(fail_text)
|
||||
raise
|
||||
except Exception as e:
|
||||
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise
|
||||
finally:
|
||||
self._is_executing = False
|
||||
@@ -1104,7 +1106,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
self._console.print(fail_text)
|
||||
raise
|
||||
except Exception as e:
|
||||
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise
|
||||
finally:
|
||||
self._is_executing = False
|
||||
|
||||
@@ -7,7 +7,7 @@ for building event-driven workflows with conditional execution and routing.
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Callable, Sequence
|
||||
from collections.abc import Callable
|
||||
from concurrent.futures import Future
|
||||
import copy
|
||||
import inspect
|
||||
@@ -2382,7 +2382,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
message: str,
|
||||
output: Any,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
emit: Sequence[str] | None = None,
|
||||
emit: list[str] | None = None,
|
||||
) -> str:
|
||||
"""Request feedback from a human.
|
||||
Args:
|
||||
@@ -2453,7 +2453,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
def _collapse_to_outcome(
|
||||
self,
|
||||
feedback: str,
|
||||
outcomes: Sequence[str],
|
||||
outcomes: list[str],
|
||||
llm: str | BaseLLM,
|
||||
) -> str:
|
||||
"""Collapse free-form feedback to a predefined outcome using LLM.
|
||||
|
||||
@@ -53,7 +53,7 @@ Example (asynchronous with custom provider):
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Callable, Sequence
|
||||
from collections.abc import Callable
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from functools import wraps
|
||||
@@ -128,7 +128,7 @@ class HumanFeedbackConfig:
|
||||
"""
|
||||
|
||||
message: str
|
||||
emit: Sequence[str] | None = None
|
||||
emit: list[str] | None = None
|
||||
llm: str | BaseLLM | None = None
|
||||
default_outcome: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
@@ -154,7 +154,7 @@ class HumanFeedbackMethod(FlowMethod[Any, Any]):
|
||||
|
||||
def human_feedback(
|
||||
message: str,
|
||||
emit: Sequence[str] | None = None,
|
||||
emit: list[str] | None = None,
|
||||
llm: str | BaseLLM | None = None,
|
||||
default_outcome: str | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
|
||||
@@ -118,20 +118,17 @@ class PersistenceDecorator:
|
||||
)
|
||||
except Exception as e:
|
||||
error_msg = LOG_MESSAGES["save_error"].format(method_name, str(e))
|
||||
if verbose:
|
||||
cls._printer.print(error_msg, color="red")
|
||||
cls._printer.print(error_msg, color="red")
|
||||
logger.error(error_msg)
|
||||
raise RuntimeError(f"State persistence failed: {e!s}") from e
|
||||
except AttributeError as e:
|
||||
error_msg = LOG_MESSAGES["state_missing"]
|
||||
if verbose:
|
||||
cls._printer.print(error_msg, color="red")
|
||||
cls._printer.print(error_msg, color="red")
|
||||
logger.error(error_msg)
|
||||
raise ValueError(error_msg) from e
|
||||
except (TypeError, ValueError) as e:
|
||||
error_msg = LOG_MESSAGES["id_missing"]
|
||||
if verbose:
|
||||
cls._printer.print(error_msg, color="red")
|
||||
cls._printer.print(error_msg, color="red")
|
||||
logger.error(error_msg)
|
||||
raise ValueError(error_msg) from e
|
||||
|
||||
|
||||
@@ -151,9 +151,7 @@ def _unwrap_function(function: Any) -> Any:
|
||||
return function
|
||||
|
||||
|
||||
def get_possible_return_constants(
|
||||
function: Any, verbose: bool = True
|
||||
) -> list[str] | None:
|
||||
def get_possible_return_constants(function: Any) -> list[str] | None:
|
||||
"""Extract possible string return values from a function using AST parsing.
|
||||
|
||||
This function analyzes the source code of a router method to identify
|
||||
@@ -180,11 +178,10 @@ def get_possible_return_constants(
|
||||
# Can't get source code
|
||||
return None
|
||||
except Exception as e:
|
||||
if verbose:
|
||||
_printer.print(
|
||||
f"Error retrieving source code for function {function.__name__}: {e}",
|
||||
color="red",
|
||||
)
|
||||
_printer.print(
|
||||
f"Error retrieving source code for function {function.__name__}: {e}",
|
||||
color="red",
|
||||
)
|
||||
return None
|
||||
|
||||
try:
|
||||
@@ -193,28 +190,25 @@ def get_possible_return_constants(
|
||||
# Parse the source code into an AST
|
||||
code_ast = ast.parse(source)
|
||||
except IndentationError as e:
|
||||
if verbose:
|
||||
_printer.print(
|
||||
f"IndentationError while parsing source code of {function.__name__}: {e}",
|
||||
color="red",
|
||||
)
|
||||
_printer.print(f"Source code:\n{source}", color="yellow")
|
||||
_printer.print(
|
||||
f"IndentationError while parsing source code of {function.__name__}: {e}",
|
||||
color="red",
|
||||
)
|
||||
_printer.print(f"Source code:\n{source}", color="yellow")
|
||||
return None
|
||||
except SyntaxError as e:
|
||||
if verbose:
|
||||
_printer.print(
|
||||
f"SyntaxError while parsing source code of {function.__name__}: {e}",
|
||||
color="red",
|
||||
)
|
||||
_printer.print(f"Source code:\n{source}", color="yellow")
|
||||
_printer.print(
|
||||
f"SyntaxError while parsing source code of {function.__name__}: {e}",
|
||||
color="red",
|
||||
)
|
||||
_printer.print(f"Source code:\n{source}", color="yellow")
|
||||
return None
|
||||
except Exception as e:
|
||||
if verbose:
|
||||
_printer.print(
|
||||
f"Unexpected error while parsing source code of {function.__name__}: {e}",
|
||||
color="red",
|
||||
)
|
||||
_printer.print(f"Source code:\n{source}", color="yellow")
|
||||
_printer.print(
|
||||
f"Unexpected error while parsing source code of {function.__name__}: {e}",
|
||||
color="red",
|
||||
)
|
||||
_printer.print(f"Source code:\n{source}", color="yellow")
|
||||
return None
|
||||
|
||||
return_values: set[str] = set()
|
||||
@@ -394,17 +388,15 @@ def get_possible_return_constants(
|
||||
|
||||
StateAttributeVisitor().visit(class_ast)
|
||||
except Exception as e:
|
||||
if verbose:
|
||||
_printer.print(
|
||||
f"Could not analyze class context for {function.__name__}: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
_printer.print(
|
||||
f"Could not analyze class context for {function.__name__}: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
except Exception as e:
|
||||
if verbose:
|
||||
_printer.print(
|
||||
f"Could not introspect class for {function.__name__}: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
_printer.print(
|
||||
f"Could not introspect class for {function.__name__}: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
VariableAssignmentVisitor().visit(code_ast)
|
||||
ReturnVisitor().visit(code_ast)
|
||||
|
||||
@@ -72,13 +72,13 @@ from crewai.utilities.agent_utils import (
|
||||
from crewai.utilities.converter import (
|
||||
Converter,
|
||||
ConverterError,
|
||||
generate_model_description,
|
||||
)
|
||||
from crewai.utilities.guardrail import process_guardrail
|
||||
from crewai.utilities.guardrail_types import GuardrailCallable, GuardrailType
|
||||
from crewai.utilities.i18n import I18N, get_i18n
|
||||
from crewai.utilities.llm_utils import create_llm
|
||||
from crewai.utilities.printer import Printer
|
||||
from crewai.utilities.pydantic_schema_utils import generate_model_description
|
||||
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||
from crewai.utilities.tool_utils import execute_tool_and_check_finality
|
||||
from crewai.utilities.types import LLMMessage
|
||||
@@ -344,12 +344,11 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
if self.verbose:
|
||||
self._printer.print(
|
||||
content="Agent failed to reach a final answer. This is likely a bug - please report it.",
|
||||
color="red",
|
||||
)
|
||||
handle_unknown_error(self._printer, e, verbose=self.verbose)
|
||||
self._printer.print(
|
||||
content="Agent failed to reach a final answer. This is likely a bug - please report it.",
|
||||
color="red",
|
||||
)
|
||||
handle_unknown_error(self._printer, e)
|
||||
# Emit error event
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
@@ -397,11 +396,10 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
if isinstance(result, BaseModel):
|
||||
formatted_result = result
|
||||
except ConverterError as e:
|
||||
if self.verbose:
|
||||
self._printer.print(
|
||||
content=f"Failed to parse output into response format after retries: {e.message}",
|
||||
color="yellow",
|
||||
)
|
||||
self._printer.print(
|
||||
content=f"Failed to parse output into response format after retries: {e.message}",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
# Calculate token usage metrics
|
||||
if isinstance(self.llm, BaseLLM):
|
||||
@@ -607,7 +605,6 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
messages=self._messages,
|
||||
llm=cast(LLM, self.llm),
|
||||
callbacks=self._callbacks,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
|
||||
enforce_rpm_limit(self.request_within_rpm_limit)
|
||||
@@ -620,7 +617,6 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
printer=self._printer,
|
||||
from_agent=self,
|
||||
executor_context=self,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
@@ -650,18 +646,16 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
|
||||
self._append_message(formatted_answer.text, role="assistant")
|
||||
except OutputParserError as e: # noqa: PERF203
|
||||
if self.verbose:
|
||||
self._printer.print(
|
||||
content="Failed to parse LLM output. Retrying...",
|
||||
color="yellow",
|
||||
)
|
||||
self._printer.print(
|
||||
content="Failed to parse LLM output. Retrying...",
|
||||
color="yellow",
|
||||
)
|
||||
formatted_answer = handle_output_parser_exception(
|
||||
e=e,
|
||||
messages=self._messages,
|
||||
iterations=self._iterations,
|
||||
log_error_after=3,
|
||||
printer=self._printer,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
@@ -676,10 +670,9 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
llm=cast(LLM, self.llm),
|
||||
callbacks=self._callbacks,
|
||||
i18n=self.i18n,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
continue
|
||||
handle_unknown_error(self._printer, e, verbose=self.verbose)
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise e
|
||||
|
||||
finally:
|
||||
|
||||
@@ -497,7 +497,7 @@ class BaseLLM(ABC):
|
||||
from_agent=from_agent,
|
||||
)
|
||||
|
||||
return str(result) if not isinstance(result, str) else result
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Error executing function '{function_name}': {e!s}"
|
||||
@@ -620,13 +620,11 @@ class BaseLLM(ABC):
|
||||
try:
|
||||
# Try to parse as JSON first
|
||||
if response.strip().startswith("{") or response.strip().startswith("["):
|
||||
data = json.loads(response)
|
||||
return response_format.model_validate(data)
|
||||
return response_format.model_validate_json(response)
|
||||
|
||||
json_match = _JSON_EXTRACTION_PATTERN.search(response)
|
||||
if json_match:
|
||||
data = json.loads(json_match.group())
|
||||
return response_format.model_validate(data)
|
||||
return response_format.model_validate_json(json_match.group())
|
||||
|
||||
raise ValueError("No JSON found in response")
|
||||
|
||||
@@ -737,25 +735,22 @@ class BaseLLM(ABC):
|
||||
task=None,
|
||||
crew=None,
|
||||
)
|
||||
verbose = getattr(from_agent, "verbose", True) if from_agent else True
|
||||
printer = Printer()
|
||||
|
||||
try:
|
||||
for hook in before_hooks:
|
||||
result = hook(hook_context)
|
||||
if result is False:
|
||||
if verbose:
|
||||
printer.print(
|
||||
content="LLM call blocked by before_llm_call hook",
|
||||
color="yellow",
|
||||
)
|
||||
printer.print(
|
||||
content="LLM call blocked by before_llm_call hook",
|
||||
color="yellow",
|
||||
)
|
||||
return False
|
||||
except Exception as e:
|
||||
if verbose:
|
||||
printer.print(
|
||||
content=f"Error in before_llm_call hook: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
printer.print(
|
||||
content=f"Error in before_llm_call hook: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
@@ -808,7 +803,6 @@ class BaseLLM(ABC):
|
||||
crew=None,
|
||||
response=response,
|
||||
)
|
||||
verbose = getattr(from_agent, "verbose", True) if from_agent else True
|
||||
printer = Printer()
|
||||
modified_response = response
|
||||
|
||||
@@ -819,10 +813,9 @@ class BaseLLM(ABC):
|
||||
modified_response = result
|
||||
hook_context.response = modified_response
|
||||
except Exception as e:
|
||||
if verbose:
|
||||
printer.print(
|
||||
content=f"Error in after_llm_call hook: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
printer.print(
|
||||
content=f"Error in after_llm_call hook: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
return modified_response
|
||||
|
||||
@@ -23,7 +23,7 @@ if TYPE_CHECKING:
|
||||
try:
|
||||
from anthropic import Anthropic, AsyncAnthropic, transform_schema
|
||||
from anthropic.types import Message, TextBlock, ThinkingBlock, ToolUseBlock
|
||||
from anthropic.types.beta import BetaMessage, BetaTextBlock
|
||||
from anthropic.types.beta import BetaMessage
|
||||
import httpx
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
@@ -337,7 +337,6 @@ class AnthropicCompletion(BaseLLM):
|
||||
available_functions: Available functions for tool calling
|
||||
from_task: Task that initiated the call
|
||||
from_agent: Agent that initiated the call
|
||||
response_model: Optional response model.
|
||||
|
||||
Returns:
|
||||
Chat completion response or tool call result
|
||||
@@ -678,31 +677,31 @@ class AnthropicCompletion(BaseLLM):
|
||||
if _is_pydantic_model_class(response_model) and response.content:
|
||||
if use_native_structured_output:
|
||||
for block in response.content:
|
||||
if isinstance(block, (TextBlock, BetaTextBlock)):
|
||||
structured_data = response_model.model_validate_json(block.text)
|
||||
if isinstance(block, TextBlock):
|
||||
structured_json = block.text
|
||||
self._emit_call_completed_event(
|
||||
response=structured_data.model_dump_json(),
|
||||
response=structured_json,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
return structured_data
|
||||
return structured_json
|
||||
else:
|
||||
for block in response.content:
|
||||
if (
|
||||
isinstance(block, ToolUseBlock)
|
||||
and block.name == "structured_output"
|
||||
):
|
||||
structured_data = response_model.model_validate(block.input)
|
||||
structured_json = json.dumps(block.input)
|
||||
self._emit_call_completed_event(
|
||||
response=structured_data.model_dump_json(),
|
||||
response=structured_json,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
return structured_data
|
||||
return structured_json
|
||||
|
||||
# Check if Claude wants to use tools
|
||||
if response.content:
|
||||
@@ -898,29 +897,28 @@ class AnthropicCompletion(BaseLLM):
|
||||
|
||||
if _is_pydantic_model_class(response_model):
|
||||
if use_native_structured_output:
|
||||
structured_data = response_model.model_validate_json(full_response)
|
||||
self._emit_call_completed_event(
|
||||
response=structured_data.model_dump_json(),
|
||||
response=full_response,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
return structured_data
|
||||
return full_response
|
||||
for block in final_message.content:
|
||||
if (
|
||||
isinstance(block, ToolUseBlock)
|
||||
and block.name == "structured_output"
|
||||
):
|
||||
structured_data = response_model.model_validate(block.input)
|
||||
structured_json = json.dumps(block.input)
|
||||
self._emit_call_completed_event(
|
||||
response=structured_data.model_dump_json(),
|
||||
response=structured_json,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
return structured_data
|
||||
return structured_json
|
||||
|
||||
if final_message.content:
|
||||
tool_uses = [
|
||||
@@ -1168,31 +1166,31 @@ class AnthropicCompletion(BaseLLM):
|
||||
if _is_pydantic_model_class(response_model) and response.content:
|
||||
if use_native_structured_output:
|
||||
for block in response.content:
|
||||
if isinstance(block, (TextBlock, BetaTextBlock)):
|
||||
structured_data = response_model.model_validate_json(block.text)
|
||||
if isinstance(block, TextBlock):
|
||||
structured_json = block.text
|
||||
self._emit_call_completed_event(
|
||||
response=structured_data.model_dump_json(),
|
||||
response=structured_json,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
return structured_data
|
||||
return structured_json
|
||||
else:
|
||||
for block in response.content:
|
||||
if (
|
||||
isinstance(block, ToolUseBlock)
|
||||
and block.name == "structured_output"
|
||||
):
|
||||
structured_data = response_model.model_validate(block.input)
|
||||
structured_json = json.dumps(block.input)
|
||||
self._emit_call_completed_event(
|
||||
response=structured_data.model_dump_json(),
|
||||
response=structured_json,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
return structured_data
|
||||
return structured_json
|
||||
|
||||
if response.content:
|
||||
tool_uses = [
|
||||
@@ -1364,29 +1362,28 @@ class AnthropicCompletion(BaseLLM):
|
||||
|
||||
if _is_pydantic_model_class(response_model):
|
||||
if use_native_structured_output:
|
||||
structured_data = response_model.model_validate_json(full_response)
|
||||
self._emit_call_completed_event(
|
||||
response=structured_data.model_dump_json(),
|
||||
response=full_response,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
return structured_data
|
||||
return full_response
|
||||
for block in final_message.content:
|
||||
if (
|
||||
isinstance(block, ToolUseBlock)
|
||||
and block.name == "structured_output"
|
||||
):
|
||||
structured_data = response_model.model_validate(block.input)
|
||||
structured_json = json.dumps(block.input)
|
||||
self._emit_call_completed_event(
|
||||
response=structured_data.model_dump_json(),
|
||||
response=structured_json,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
return structured_data
|
||||
return structured_json
|
||||
|
||||
if final_message.content:
|
||||
tool_uses = [
|
||||
|
||||
@@ -557,7 +557,7 @@ class AzureCompletion(BaseLLM):
|
||||
params: AzureCompletionParams,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
) -> BaseModel:
|
||||
) -> str:
|
||||
"""Validate content against response model and emit completion event.
|
||||
|
||||
Args:
|
||||
@@ -568,23 +568,24 @@ class AzureCompletion(BaseLLM):
|
||||
from_agent: Agent that initiated the call
|
||||
|
||||
Returns:
|
||||
Validated Pydantic model instance
|
||||
Validated and serialized JSON string
|
||||
|
||||
Raises:
|
||||
ValueError: If validation fails
|
||||
"""
|
||||
try:
|
||||
structured_data = response_model.model_validate_json(content)
|
||||
structured_json = structured_data.model_dump_json()
|
||||
|
||||
self._emit_call_completed_event(
|
||||
response=structured_data.model_dump_json(),
|
||||
response=structured_json,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
|
||||
return structured_data
|
||||
return structured_json
|
||||
except Exception as e:
|
||||
error_msg = f"Failed to validate structured output with model {response_model.__name__}: {e}"
|
||||
logging.error(error_msg)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Mapping, Sequence
|
||||
from collections.abc import Sequence
|
||||
from contextlib import AsyncExitStack
|
||||
import json
|
||||
import logging
|
||||
@@ -538,7 +538,7 @@ class BedrockCompletion(BaseLLM):
|
||||
self,
|
||||
messages: list[LLMMessage],
|
||||
body: BedrockConverseRequestBody,
|
||||
available_functions: Mapping[str, Any] | None = None,
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
@@ -1009,7 +1009,7 @@ class BedrockCompletion(BaseLLM):
|
||||
self,
|
||||
messages: list[LLMMessage],
|
||||
body: BedrockConverseRequestBody,
|
||||
available_functions: Mapping[str, Any] | None = None,
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
|
||||
@@ -132,9 +132,6 @@ class GeminiCompletion(BaseLLM):
|
||||
self.supports_tools = bool(
|
||||
version_match and float(version_match.group(1)) >= 1.5
|
||||
)
|
||||
self.is_gemini_2_0 = bool(
|
||||
version_match and float(version_match.group(1)) >= 2.0
|
||||
)
|
||||
|
||||
@property
|
||||
def stop(self) -> list[str]:
|
||||
@@ -442,11 +439,6 @@ class GeminiCompletion(BaseLLM):
|
||||
|
||||
Returns:
|
||||
GenerateContentConfig object for Gemini API
|
||||
|
||||
Note:
|
||||
Structured output support varies by model version:
|
||||
- Gemini 1.5 and earlier: Uses response_schema (Pydantic model)
|
||||
- Gemini 2.0+: Uses response_json_schema (JSON Schema) with propertyOrdering
|
||||
"""
|
||||
self.tools = tools
|
||||
config_params: dict[str, Any] = {}
|
||||
@@ -474,13 +466,9 @@ class GeminiCompletion(BaseLLM):
|
||||
if response_model:
|
||||
config_params["response_mime_type"] = "application/json"
|
||||
schema_output = generate_model_description(response_model)
|
||||
schema = schema_output.get("json_schema", {}).get("schema", {})
|
||||
|
||||
if self.is_gemini_2_0:
|
||||
schema = self._add_property_ordering(schema)
|
||||
config_params["response_json_schema"] = schema
|
||||
else:
|
||||
config_params["response_schema"] = response_model
|
||||
config_params["response_schema"] = schema_output.get("json_schema", {}).get(
|
||||
"schema", {}
|
||||
)
|
||||
|
||||
# Handle tools for supported models
|
||||
if tools and self.supports_tools:
|
||||
@@ -644,7 +632,7 @@ class GeminiCompletion(BaseLLM):
|
||||
messages_for_event: list[LLMMessage],
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
) -> BaseModel:
|
||||
) -> str:
|
||||
"""Validate content against response model and emit completion event.
|
||||
|
||||
Args:
|
||||
@@ -655,23 +643,24 @@ class GeminiCompletion(BaseLLM):
|
||||
from_agent: Agent that initiated the call
|
||||
|
||||
Returns:
|
||||
Validated Pydantic model instance
|
||||
Validated and serialized JSON string
|
||||
|
||||
Raises:
|
||||
ValueError: If validation fails
|
||||
"""
|
||||
try:
|
||||
structured_data = response_model.model_validate_json(content)
|
||||
structured_json = structured_data.model_dump_json()
|
||||
|
||||
self._emit_call_completed_event(
|
||||
response=structured_data.model_dump_json(),
|
||||
response=structured_json,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=messages_for_event,
|
||||
)
|
||||
|
||||
return structured_data
|
||||
return structured_json
|
||||
except Exception as e:
|
||||
error_msg = f"Failed to validate structured output with model {response_model.__name__}: {e}"
|
||||
logging.error(error_msg)
|
||||
@@ -684,7 +673,7 @@ class GeminiCompletion(BaseLLM):
|
||||
response_model: type[BaseModel] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
) -> str | BaseModel:
|
||||
) -> str:
|
||||
"""Finalize completion response with validation and event emission.
|
||||
|
||||
Args:
|
||||
@@ -695,7 +684,7 @@ class GeminiCompletion(BaseLLM):
|
||||
from_agent: Agent that initiated the call
|
||||
|
||||
Returns:
|
||||
Final response content after processing (str or Pydantic model if response_model provided)
|
||||
Final response content after processing
|
||||
"""
|
||||
messages_for_event = self._convert_contents_to_dict(contents)
|
||||
|
||||
@@ -881,7 +870,7 @@ class GeminiCompletion(BaseLLM):
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str | BaseModel | list[dict[str, Any]]:
|
||||
) -> str | list[dict[str, Any]]:
|
||||
"""Finalize streaming response with usage tracking, function execution, and events.
|
||||
|
||||
Args:
|
||||
@@ -1001,7 +990,7 @@ class GeminiCompletion(BaseLLM):
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str | BaseModel | list[dict[str, Any]] | Any:
|
||||
) -> str | Any:
|
||||
"""Handle streaming content generation."""
|
||||
full_response = ""
|
||||
function_calls: dict[int, dict[str, Any]] = {}
|
||||
@@ -1201,36 +1190,6 @@ class GeminiCompletion(BaseLLM):
|
||||
|
||||
return "".join(text_parts)
|
||||
|
||||
@staticmethod
|
||||
def _add_property_ordering(schema: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Add propertyOrdering to JSON schema for Gemini 2.0 compatibility.
|
||||
|
||||
Gemini 2.0 models require an explicit propertyOrdering list to define
|
||||
the preferred structure of JSON objects. This recursively adds
|
||||
propertyOrdering to all objects in the schema.
|
||||
|
||||
Args:
|
||||
schema: JSON schema dictionary.
|
||||
|
||||
Returns:
|
||||
Modified schema with propertyOrdering added to all objects.
|
||||
"""
|
||||
if isinstance(schema, dict):
|
||||
if schema.get("type") == "object" and "properties" in schema:
|
||||
properties = schema["properties"]
|
||||
if properties and "propertyOrdering" not in schema:
|
||||
schema["propertyOrdering"] = list(properties.keys())
|
||||
|
||||
for value in schema.values():
|
||||
if isinstance(value, dict):
|
||||
GeminiCompletion._add_property_ordering(value)
|
||||
elif isinstance(value, list):
|
||||
for item in value:
|
||||
if isinstance(item, dict):
|
||||
GeminiCompletion._add_property_ordering(item)
|
||||
|
||||
return schema
|
||||
|
||||
@staticmethod
|
||||
def _convert_contents_to_dict(
|
||||
contents: list[types.Content],
|
||||
|
||||
@@ -1570,14 +1570,15 @@ class OpenAICompletion(BaseLLM):
|
||||
|
||||
parsed_object = parsed_response.choices[0].message.parsed
|
||||
if parsed_object:
|
||||
structured_json = parsed_object.model_dump_json()
|
||||
self._emit_call_completed_event(
|
||||
response=parsed_object.model_dump_json(),
|
||||
response=structured_json,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
return parsed_object
|
||||
return structured_json
|
||||
|
||||
response: ChatCompletion = self.client.chat.completions.create(**params)
|
||||
|
||||
@@ -1691,7 +1692,7 @@ class OpenAICompletion(BaseLLM):
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str | BaseModel:
|
||||
) -> str:
|
||||
"""Handle streaming chat completion."""
|
||||
full_response = ""
|
||||
tool_calls: dict[int, dict[str, Any]] = {}
|
||||
@@ -1727,14 +1728,15 @@ class OpenAICompletion(BaseLLM):
|
||||
if final_completion.choices:
|
||||
parsed_result = final_completion.choices[0].message.parsed
|
||||
if parsed_result:
|
||||
structured_json = parsed_result.model_dump_json()
|
||||
self._emit_call_completed_event(
|
||||
response=parsed_result.model_dump_json(),
|
||||
response=structured_json,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
return parsed_result
|
||||
return structured_json
|
||||
|
||||
logging.error("Failed to get parsed result from stream")
|
||||
return ""
|
||||
@@ -1885,14 +1887,15 @@ class OpenAICompletion(BaseLLM):
|
||||
|
||||
parsed_object = parsed_response.choices[0].message.parsed
|
||||
if parsed_object:
|
||||
structured_json = parsed_object.model_dump_json()
|
||||
self._emit_call_completed_event(
|
||||
response=parsed_object.model_dump_json(),
|
||||
response=structured_json,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
return parsed_object
|
||||
return structured_json
|
||||
|
||||
response: ChatCompletion = await self.async_client.chat.completions.create(
|
||||
**params
|
||||
@@ -2003,7 +2006,7 @@ class OpenAICompletion(BaseLLM):
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str | BaseModel:
|
||||
) -> str:
|
||||
"""Handle async streaming chat completion."""
|
||||
full_response = ""
|
||||
tool_calls: dict[int, dict[str, Any]] = {}
|
||||
@@ -2041,16 +2044,17 @@ class OpenAICompletion(BaseLLM):
|
||||
|
||||
try:
|
||||
parsed_object = response_model.model_validate_json(accumulated_content)
|
||||
structured_json = parsed_object.model_dump_json()
|
||||
|
||||
self._emit_call_completed_event(
|
||||
response=parsed_object.model_dump_json(),
|
||||
response=structured_json,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
messages=params["messages"],
|
||||
)
|
||||
|
||||
return parsed_object
|
||||
return structured_json
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to parse structured output from stream: {e}")
|
||||
self._emit_call_completed_event(
|
||||
|
||||
@@ -12,17 +12,15 @@ from crewai.utilities.paths import db_storage_path
|
||||
class LTMSQLiteStorage:
|
||||
"""SQLite storage class for long-term memory data."""
|
||||
|
||||
def __init__(self, db_path: str | None = None, verbose: bool = True) -> None:
|
||||
def __init__(self, db_path: str | None = None) -> None:
|
||||
"""Initialize the SQLite storage.
|
||||
|
||||
Args:
|
||||
db_path: Optional path to the database file.
|
||||
verbose: Whether to print error messages.
|
||||
"""
|
||||
if db_path is None:
|
||||
db_path = str(Path(db_storage_path()) / "long_term_memory_storage.db")
|
||||
self.db_path = db_path
|
||||
self._verbose = verbose
|
||||
self._printer: Printer = Printer()
|
||||
Path(self.db_path).parent.mkdir(parents=True, exist_ok=True)
|
||||
self._initialize_db()
|
||||
@@ -46,11 +44,10 @@ class LTMSQLiteStorage:
|
||||
|
||||
conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred during database initialization: {e}",
|
||||
color="red",
|
||||
)
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred during database initialization: {e}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
def save(
|
||||
self,
|
||||
@@ -72,11 +69,10 @@ class LTMSQLiteStorage:
|
||||
)
|
||||
conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while saving to LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while saving to LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
def load(self, task_description: str, latest_n: int) -> list[dict[str, Any]] | None:
|
||||
"""Queries the LTM table by task description with error handling."""
|
||||
@@ -105,11 +101,10 @@ class LTMSQLiteStorage:
|
||||
]
|
||||
|
||||
except sqlite3.Error as e:
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while querying LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while querying LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
return None
|
||||
|
||||
def reset(self) -> None:
|
||||
@@ -121,11 +116,10 @@ class LTMSQLiteStorage:
|
||||
conn.commit()
|
||||
|
||||
except sqlite3.Error as e:
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while deleting all rows in LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while deleting all rows in LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
async def asave(
|
||||
self,
|
||||
@@ -153,11 +147,10 @@ class LTMSQLiteStorage:
|
||||
)
|
||||
await conn.commit()
|
||||
except aiosqlite.Error as e:
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while saving to LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while saving to LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
async def aload(
|
||||
self, task_description: str, latest_n: int
|
||||
@@ -194,11 +187,10 @@ class LTMSQLiteStorage:
|
||||
for row in rows
|
||||
]
|
||||
except aiosqlite.Error as e:
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while querying LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while querying LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
return None
|
||||
|
||||
async def areset(self) -> None:
|
||||
@@ -208,8 +200,7 @@ class LTMSQLiteStorage:
|
||||
await conn.execute("DELETE FROM long_term_memories")
|
||||
await conn.commit()
|
||||
except aiosqlite.Error as e:
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while deleting all rows in LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while deleting all rows in LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
"""Type definitions specific to ChromaDB implementation."""
|
||||
|
||||
from collections.abc import Mapping
|
||||
from typing import Any, NamedTuple
|
||||
|
||||
from chromadb.api import AsyncClientAPI, ClientAPI
|
||||
@@ -49,7 +48,7 @@ class PreparedDocuments(NamedTuple):
|
||||
|
||||
ids: list[str]
|
||||
texts: list[str]
|
||||
metadatas: list[Mapping[str, str | int | float | bool]]
|
||||
metadatas: list[dict[str, str | int | float | bool]]
|
||||
|
||||
|
||||
class ExtractedSearchParams(NamedTuple):
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
"""Utility functions for ChromaDB client implementation."""
|
||||
|
||||
from collections.abc import Mapping
|
||||
import hashlib
|
||||
import json
|
||||
from typing import Literal, TypeGuard, cast
|
||||
@@ -66,7 +65,7 @@ def _prepare_documents_for_chromadb(
|
||||
"""
|
||||
ids: list[str] = []
|
||||
texts: list[str] = []
|
||||
metadatas: list[Mapping[str, str | int | float | bool]] = []
|
||||
metadatas: list[dict[str, str | int | float | bool]] = []
|
||||
seen_ids: dict[str, int] = {}
|
||||
|
||||
try:
|
||||
@@ -111,7 +110,7 @@ def _prepare_documents_for_chromadb(
|
||||
|
||||
def _create_batch_slice(
|
||||
prepared: PreparedDocuments, start_index: int, batch_size: int
|
||||
) -> tuple[list[str], list[str], list[Mapping[str, str | int | float | bool]] | None]:
|
||||
) -> tuple[list[str], list[str], list[dict[str, str | int | float | bool]] | None]:
|
||||
"""Create a batch slice from prepared documents.
|
||||
|
||||
Args:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
"""IBM WatsonX embedding function implementation."""
|
||||
|
||||
from typing import Any, cast
|
||||
from typing import cast
|
||||
|
||||
from chromadb.api.types import Documents, EmbeddingFunction, Embeddings
|
||||
from typing_extensions import Unpack
|
||||
@@ -15,18 +15,14 @@ _printer = Printer()
|
||||
class WatsonXEmbeddingFunction(EmbeddingFunction[Documents]):
|
||||
"""Embedding function for IBM WatsonX models."""
|
||||
|
||||
def __init__(
|
||||
self, *, verbose: bool = True, **kwargs: Unpack[WatsonXProviderConfig]
|
||||
) -> None:
|
||||
def __init__(self, **kwargs: Unpack[WatsonXProviderConfig]) -> None:
|
||||
"""Initialize WatsonX embedding function.
|
||||
|
||||
Args:
|
||||
verbose: Whether to print error messages.
|
||||
**kwargs: Configuration parameters for WatsonX Embeddings and Credentials.
|
||||
"""
|
||||
super().__init__(**kwargs)
|
||||
self._config = kwargs
|
||||
self._verbose = verbose
|
||||
|
||||
@staticmethod
|
||||
def name() -> str:
|
||||
@@ -60,7 +56,7 @@ class WatsonXEmbeddingFunction(EmbeddingFunction[Documents]):
|
||||
if isinstance(input, str):
|
||||
input = [input]
|
||||
|
||||
embeddings_config: dict[str, Any] = {
|
||||
embeddings_config: dict = {
|
||||
"model_id": self._config["model_id"],
|
||||
}
|
||||
if "params" in self._config and self._config["params"] is not None:
|
||||
@@ -94,7 +90,7 @@ class WatsonXEmbeddingFunction(EmbeddingFunction[Documents]):
|
||||
if "credentials" in self._config and self._config["credentials"] is not None:
|
||||
embeddings_config["credentials"] = self._config["credentials"]
|
||||
else:
|
||||
cred_config: dict[str, Any] = {}
|
||||
cred_config: dict = {}
|
||||
if "url" in self._config and self._config["url"] is not None:
|
||||
cred_config["url"] = self._config["url"]
|
||||
if "api_key" in self._config and self._config["api_key"] is not None:
|
||||
@@ -163,6 +159,5 @@ class WatsonXEmbeddingFunction(EmbeddingFunction[Documents]):
|
||||
embeddings = embedding.embed_documents(input)
|
||||
return cast(Embeddings, embeddings)
|
||||
except Exception as e:
|
||||
if self._verbose:
|
||||
_printer.print(f"Error during WatsonX embedding: {e}", color="red")
|
||||
_printer.print(f"Error during WatsonX embedding: {e}", color="red")
|
||||
raise
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
"""Type definitions for the embeddings module."""
|
||||
|
||||
from typing import Any, Literal, TypeAlias
|
||||
from typing import Annotated, Any, Literal, TypeAlias
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from crewai.rag.core.base_embeddings_provider import BaseEmbeddingsProvider
|
||||
from crewai.rag.embeddings.providers.aws.types import BedrockProviderSpec
|
||||
@@ -29,7 +31,7 @@ from crewai.rag.embeddings.providers.text2vec.types import Text2VecProviderSpec
|
||||
from crewai.rag.embeddings.providers.voyageai.types import VoyageAIProviderSpec
|
||||
|
||||
|
||||
ProviderSpec: TypeAlias = (
|
||||
ProviderSpec: TypeAlias = Annotated[
|
||||
AzureProviderSpec
|
||||
| BedrockProviderSpec
|
||||
| CohereProviderSpec
|
||||
@@ -47,8 +49,9 @@ ProviderSpec: TypeAlias = (
|
||||
| Text2VecProviderSpec
|
||||
| VertexAIProviderSpec
|
||||
| VoyageAIProviderSpec
|
||||
| WatsonXProviderSpec
|
||||
)
|
||||
| WatsonXProviderSpec,
|
||||
Field(discriminator="provider"),
|
||||
]
|
||||
|
||||
AllowedEmbeddingProviders = Literal[
|
||||
"azure",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
"""Type definitions for RAG (Retrieval-Augmented Generation) systems."""
|
||||
|
||||
from collections.abc import Callable, Mapping
|
||||
from collections.abc import Callable
|
||||
from typing import Any, TypeAlias
|
||||
|
||||
from typing_extensions import Required, TypedDict
|
||||
@@ -19,8 +19,8 @@ class BaseRecord(TypedDict, total=False):
|
||||
doc_id: str
|
||||
content: Required[str]
|
||||
metadata: (
|
||||
Mapping[str, str | int | float | bool]
|
||||
| list[Mapping[str, str | int | float | bool]]
|
||||
dict[str, str | int | float | bool]
|
||||
| list[dict[str, str | int | float | bool]]
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -767,11 +767,10 @@ class Task(BaseModel):
|
||||
if files:
|
||||
supported_types: list[str] = []
|
||||
if self.agent.llm and self.agent.llm.supports_multimodal():
|
||||
provider: str = str(
|
||||
getattr(self.agent.llm, "provider", None)
|
||||
or getattr(self.agent.llm, "model", "openai")
|
||||
provider = getattr(self.agent.llm, "provider", None) or getattr(
|
||||
self.agent.llm, "model", "openai"
|
||||
)
|
||||
api: str | None = getattr(self.agent.llm, "api", None)
|
||||
api = getattr(self.agent.llm, "api", None)
|
||||
supported_types = get_supported_content_types(provider, api)
|
||||
|
||||
def is_auto_injected(content_type: str) -> bool:
|
||||
@@ -888,11 +887,10 @@ Follow these guidelines:
|
||||
try:
|
||||
crew_chat_messages = json.loads(crew_chat_messages_json)
|
||||
except json.JSONDecodeError as e:
|
||||
if self.agent and self.agent.verbose:
|
||||
_printer.print(
|
||||
f"An error occurred while parsing crew chat messages: {e}",
|
||||
color="red",
|
||||
)
|
||||
_printer.print(
|
||||
f"An error occurred while parsing crew chat messages: {e}",
|
||||
color="red",
|
||||
)
|
||||
raise
|
||||
|
||||
conversation_history = "\n".join(
|
||||
@@ -1134,12 +1132,11 @@ Follow these guidelines:
|
||||
guardrail_result_error=guardrail_result.error,
|
||||
task_output=task_output.raw,
|
||||
)
|
||||
if agent and agent.verbose:
|
||||
printer = Printer()
|
||||
printer.print(
|
||||
content=f"Guardrail {guardrail_index if guardrail_index is not None else ''} blocked (attempt {attempt + 1}/{max_attempts}), retrying due to: {guardrail_result.error}\n",
|
||||
color="yellow",
|
||||
)
|
||||
printer = Printer()
|
||||
printer.print(
|
||||
content=f"Guardrail {guardrail_index if guardrail_index is not None else ''} blocked (attempt {attempt + 1}/{max_attempts}), retrying due to: {guardrail_result.error}\n",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
# Regenerate output from agent
|
||||
result = agent.execute_task(
|
||||
@@ -1232,12 +1229,11 @@ Follow these guidelines:
|
||||
guardrail_result_error=guardrail_result.error,
|
||||
task_output=task_output.raw,
|
||||
)
|
||||
if agent and agent.verbose:
|
||||
printer = Printer()
|
||||
printer.print(
|
||||
content=f"Guardrail {guardrail_index if guardrail_index is not None else ''} blocked (attempt {attempt + 1}/{max_attempts}), retrying due to: {guardrail_result.error}\n",
|
||||
color="yellow",
|
||||
)
|
||||
printer = Printer()
|
||||
printer.print(
|
||||
content=f"Guardrail {guardrail_index if guardrail_index is not None else ''} blocked (attempt {attempt + 1}/{max_attempts}), retrying due to: {guardrail_result.error}\n",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
result = await agent.aexecute_task(
|
||||
task=self,
|
||||
|
||||
@@ -200,9 +200,12 @@ class CrewStructuredTool:
|
||||
"""
|
||||
if isinstance(raw_args, str):
|
||||
try:
|
||||
raw_args = json.loads(raw_args)
|
||||
validated_args = self.args_schema.model_validate_json(raw_args)
|
||||
return validated_args.model_dump()
|
||||
except json.JSONDecodeError as e:
|
||||
raise ValueError(f"Failed to parse arguments as JSON: {e}") from e
|
||||
except Exception as e:
|
||||
raise ValueError(f"Arguments validation failed: {e}") from e
|
||||
|
||||
try:
|
||||
validated_args = self.args_schema.model_validate(raw_args)
|
||||
|
||||
@@ -384,8 +384,6 @@ class ToolUsage:
|
||||
if (
|
||||
hasattr(available_tool, "max_usage_count")
|
||||
and available_tool.max_usage_count is not None
|
||||
and self.agent
|
||||
and self.agent.verbose
|
||||
):
|
||||
self._printer.print(
|
||||
content=f"Tool '{sanitize_tool_name(available_tool.name)}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}",
|
||||
@@ -398,8 +396,6 @@ class ToolUsage:
|
||||
if (
|
||||
hasattr(available_tool, "max_usage_count")
|
||||
and available_tool.max_usage_count is not None
|
||||
and self.agent
|
||||
and self.agent.verbose
|
||||
):
|
||||
self._printer.print(
|
||||
content=f"Tool '{sanitize_tool_name(available_tool.name)}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}",
|
||||
@@ -614,8 +610,6 @@ class ToolUsage:
|
||||
if (
|
||||
hasattr(available_tool, "max_usage_count")
|
||||
and available_tool.max_usage_count is not None
|
||||
and self.agent
|
||||
and self.agent.verbose
|
||||
):
|
||||
self._printer.print(
|
||||
content=f"Tool '{sanitize_tool_name(available_tool.name)}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}",
|
||||
@@ -628,8 +622,6 @@ class ToolUsage:
|
||||
if (
|
||||
hasattr(available_tool, "max_usage_count")
|
||||
and available_tool.max_usage_count is not None
|
||||
and self.agent
|
||||
and self.agent.verbose
|
||||
):
|
||||
self._printer.print(
|
||||
content=f"Tool '{sanitize_tool_name(available_tool.name)}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}",
|
||||
@@ -892,17 +884,15 @@ class ToolUsage:
|
||||
# Attempt 4: Repair JSON
|
||||
try:
|
||||
repaired_input = str(repair_json(tool_input, skip_json_loads=True))
|
||||
if self.agent and self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=f"Repaired JSON: {repaired_input}", color="blue"
|
||||
)
|
||||
self._printer.print(
|
||||
content=f"Repaired JSON: {repaired_input}", color="blue"
|
||||
)
|
||||
arguments = json.loads(repaired_input)
|
||||
if isinstance(arguments, dict):
|
||||
return arguments
|
||||
except Exception as e:
|
||||
error = f"Failed to repair JSON: {e}"
|
||||
if self.agent and self.agent.verbose:
|
||||
self._printer.print(content=error, color="red")
|
||||
self._printer.print(content=error, color="red")
|
||||
|
||||
error_message = (
|
||||
"Tool input must be a valid dictionary in JSON or Python literal format"
|
||||
|
||||
@@ -10,10 +10,9 @@
|
||||
"memory": "\n\n# Useful context: \n{memory}",
|
||||
"role_playing": "You are {role}. {backstory}\nYour personal goal is: {goal}",
|
||||
"tools": "\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce all necessary information is gathered, return the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n```",
|
||||
"no_tools": "",
|
||||
"task_no_tools": "\nCurrent Task: {input}\n\nProvide your complete response:",
|
||||
"native_tools": "",
|
||||
"native_task": "\nCurrent Task: {input}",
|
||||
"no_tools": "\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!",
|
||||
"native_tools": "\nUse available tools to gather information and complete your task.",
|
||||
"native_task": "\nCurrent Task: {input}\n\nThis is VERY important to you, your job depends on it!",
|
||||
"post_tool_reasoning": "Analyze the tool result. If requirements are met, provide the Final Answer. Otherwise, call the next tool. Deliver only the answer without meta-commentary.",
|
||||
"format": "Decide if you need a tool or can provide the final answer. Use one at a time.\nTo use a tool, use:\nThought: [reasoning]\nAction: [name from {tool_names}]\nAction Input: [JSON object]\n\nTo provide the final answer, use:\nThought: [reasoning]\nFinal Answer: [complete response]",
|
||||
"final_answer_format": "If you don't need to use any more tools, you must give your best complete final answer, make sure it satisfies the expected criteria, use the EXACT format below:\n\n```\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\n\n```",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Callable, Sequence
|
||||
from collections.abc import Callable
|
||||
import json
|
||||
import re
|
||||
from typing import TYPE_CHECKING, Any, Final, Literal, TypedDict
|
||||
@@ -98,7 +98,7 @@ def parse_tools(tools: list[BaseTool]) -> list[CrewStructuredTool]:
|
||||
return tools_list
|
||||
|
||||
|
||||
def get_tool_names(tools: Sequence[CrewStructuredTool | BaseTool]) -> str:
|
||||
def get_tool_names(tools: list[CrewStructuredTool | BaseTool]) -> str:
|
||||
"""Get the sanitized names of the tools.
|
||||
|
||||
Args:
|
||||
@@ -111,7 +111,7 @@ def get_tool_names(tools: Sequence[CrewStructuredTool | BaseTool]) -> str:
|
||||
|
||||
|
||||
def render_text_description_and_args(
|
||||
tools: Sequence[CrewStructuredTool | BaseTool],
|
||||
tools: list[CrewStructuredTool | BaseTool],
|
||||
) -> str:
|
||||
"""Render the tool name, description, and args in plain text.
|
||||
|
||||
@@ -130,7 +130,7 @@ def render_text_description_and_args(
|
||||
|
||||
|
||||
def convert_tools_to_openai_schema(
|
||||
tools: Sequence[BaseTool | CrewStructuredTool],
|
||||
tools: list[BaseTool | CrewStructuredTool],
|
||||
) -> tuple[list[dict[str, Any]], dict[str, Callable[..., Any]]]:
|
||||
"""Convert CrewAI tools to OpenAI function calling format.
|
||||
|
||||
@@ -210,7 +210,6 @@ def handle_max_iterations_exceeded(
|
||||
messages: list[LLMMessage],
|
||||
llm: LLM | BaseLLM,
|
||||
callbacks: list[TokenCalcHandler],
|
||||
verbose: bool = True,
|
||||
) -> AgentFinish:
|
||||
"""Handles the case when the maximum number of iterations is exceeded. Performs one more LLM call to get the final answer.
|
||||
|
||||
@@ -221,16 +220,14 @@ def handle_max_iterations_exceeded(
|
||||
messages: List of messages to send to the LLM.
|
||||
llm: The LLM instance to call.
|
||||
callbacks: List of callbacks for the LLM call.
|
||||
verbose: Whether to print output.
|
||||
|
||||
Returns:
|
||||
AgentFinish with the final answer after exceeding max iterations.
|
||||
"""
|
||||
if verbose:
|
||||
printer.print(
|
||||
content="Maximum iterations reached. Requesting final answer.",
|
||||
color="yellow",
|
||||
)
|
||||
printer.print(
|
||||
content="Maximum iterations reached. Requesting final answer.",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
if formatted_answer and hasattr(formatted_answer, "text"):
|
||||
assistant_message = (
|
||||
@@ -248,11 +245,10 @@ def handle_max_iterations_exceeded(
|
||||
)
|
||||
|
||||
if answer is None or answer == "":
|
||||
if verbose:
|
||||
printer.print(
|
||||
content="Received None or empty response from LLM call.",
|
||||
color="red",
|
||||
)
|
||||
printer.print(
|
||||
content="Received None or empty response from LLM call.",
|
||||
color="red",
|
||||
)
|
||||
raise ValueError("Invalid response from LLM call - None or empty.")
|
||||
|
||||
formatted = format_answer(answer=answer)
|
||||
@@ -326,8 +322,7 @@ def get_llm_response(
|
||||
from_agent: Agent | LiteAgent | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
executor_context: CrewAgentExecutor | AgentExecutor | LiteAgent | None = None,
|
||||
verbose: bool = True,
|
||||
) -> str | BaseModel | Any:
|
||||
) -> str | Any:
|
||||
"""Call the LLM and return the response, handling any invalid responses.
|
||||
|
||||
Args:
|
||||
@@ -341,11 +336,10 @@ def get_llm_response(
|
||||
from_agent: Optional agent context for the LLM call.
|
||||
response_model: Optional Pydantic model for structured outputs.
|
||||
executor_context: Optional executor context for hook invocation.
|
||||
verbose: Whether to print output.
|
||||
|
||||
Returns:
|
||||
The response from the LLM as a string, Pydantic model (when response_model is provided),
|
||||
or tool call results if native function calling is used.
|
||||
The response from the LLM as a string, or tool call results if
|
||||
native function calling is used.
|
||||
|
||||
Raises:
|
||||
Exception: If an error occurs.
|
||||
@@ -353,7 +347,7 @@ def get_llm_response(
|
||||
"""
|
||||
|
||||
if executor_context is not None:
|
||||
if not _setup_before_llm_call_hooks(executor_context, printer, verbose=verbose):
|
||||
if not _setup_before_llm_call_hooks(executor_context, printer):
|
||||
raise ValueError("LLM call blocked by before_llm_call hook")
|
||||
messages = executor_context.messages
|
||||
|
||||
@@ -370,16 +364,13 @@ def get_llm_response(
|
||||
except Exception as e:
|
||||
raise e
|
||||
if not answer:
|
||||
if verbose:
|
||||
printer.print(
|
||||
content="Received None or empty response from LLM call.",
|
||||
color="red",
|
||||
)
|
||||
printer.print(
|
||||
content="Received None or empty response from LLM call.",
|
||||
color="red",
|
||||
)
|
||||
raise ValueError("Invalid response from LLM call - None or empty.")
|
||||
|
||||
return _setup_after_llm_call_hooks(
|
||||
executor_context, answer, printer, verbose=verbose
|
||||
)
|
||||
return _setup_after_llm_call_hooks(executor_context, answer, printer)
|
||||
|
||||
|
||||
async def aget_llm_response(
|
||||
@@ -393,8 +384,7 @@ async def aget_llm_response(
|
||||
from_agent: Agent | LiteAgent | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
executor_context: CrewAgentExecutor | AgentExecutor | None = None,
|
||||
verbose: bool = True,
|
||||
) -> str | BaseModel | Any:
|
||||
) -> str | Any:
|
||||
"""Call the LLM asynchronously and return the response.
|
||||
|
||||
Args:
|
||||
@@ -410,15 +400,15 @@ async def aget_llm_response(
|
||||
executor_context: Optional executor context for hook invocation.
|
||||
|
||||
Returns:
|
||||
The response from the LLM as a string, Pydantic model (when response_model is provided),
|
||||
or tool call results if native function calling is used.
|
||||
The response from the LLM as a string, or tool call results if
|
||||
native function calling is used.
|
||||
|
||||
Raises:
|
||||
Exception: If an error occurs.
|
||||
ValueError: If the response is None or empty.
|
||||
"""
|
||||
if executor_context is not None:
|
||||
if not _setup_before_llm_call_hooks(executor_context, printer, verbose=verbose):
|
||||
if not _setup_before_llm_call_hooks(executor_context, printer):
|
||||
raise ValueError("LLM call blocked by before_llm_call hook")
|
||||
messages = executor_context.messages
|
||||
|
||||
@@ -435,16 +425,13 @@ async def aget_llm_response(
|
||||
except Exception as e:
|
||||
raise e
|
||||
if not answer:
|
||||
if verbose:
|
||||
printer.print(
|
||||
content="Received None or empty response from LLM call.",
|
||||
color="red",
|
||||
)
|
||||
printer.print(
|
||||
content="Received None or empty response from LLM call.",
|
||||
color="red",
|
||||
)
|
||||
raise ValueError("Invalid response from LLM call - None or empty.")
|
||||
|
||||
return _setup_after_llm_call_hooks(
|
||||
executor_context, answer, printer, verbose=verbose
|
||||
)
|
||||
return _setup_after_llm_call_hooks(executor_context, answer, printer)
|
||||
|
||||
|
||||
def process_llm_response(
|
||||
@@ -511,19 +498,13 @@ def handle_agent_action_core(
|
||||
return formatted_answer
|
||||
|
||||
|
||||
def handle_unknown_error(
|
||||
printer: Printer, exception: Exception, verbose: bool = True
|
||||
) -> None:
|
||||
def handle_unknown_error(printer: Printer, exception: Exception) -> None:
|
||||
"""Handle unknown errors by informing the user.
|
||||
|
||||
Args:
|
||||
printer: Printer instance for output
|
||||
exception: The exception that occurred
|
||||
verbose: Whether to print output.
|
||||
"""
|
||||
if not verbose:
|
||||
return
|
||||
|
||||
error_message = str(exception)
|
||||
|
||||
if "litellm" in error_message:
|
||||
@@ -545,7 +526,6 @@ def handle_output_parser_exception(
|
||||
iterations: int,
|
||||
log_error_after: int = 3,
|
||||
printer: Printer | None = None,
|
||||
verbose: bool = True,
|
||||
) -> AgentAction:
|
||||
"""Handle OutputParserError by updating messages and formatted_answer.
|
||||
|
||||
@@ -568,7 +548,7 @@ def handle_output_parser_exception(
|
||||
thought="",
|
||||
)
|
||||
|
||||
if verbose and iterations > log_error_after and printer:
|
||||
if iterations > log_error_after and printer:
|
||||
printer.print(
|
||||
content=f"Error parsing LLM output, agent will retry: {e.error}",
|
||||
color="red",
|
||||
@@ -598,7 +578,6 @@ def handle_context_length(
|
||||
llm: LLM | BaseLLM,
|
||||
callbacks: list[TokenCalcHandler],
|
||||
i18n: I18N,
|
||||
verbose: bool = True,
|
||||
) -> None:
|
||||
"""Handle context length exceeded by either summarizing or raising an error.
|
||||
|
||||
@@ -614,20 +593,16 @@ def handle_context_length(
|
||||
SystemExit: If context length is exceeded and user opts not to summarize
|
||||
"""
|
||||
if respect_context_window:
|
||||
if verbose:
|
||||
printer.print(
|
||||
content="Context length exceeded. Summarizing content to fit the model context window. Might take a while...",
|
||||
color="yellow",
|
||||
)
|
||||
summarize_messages(
|
||||
messages=messages, llm=llm, callbacks=callbacks, i18n=i18n, verbose=verbose
|
||||
printer.print(
|
||||
content="Context length exceeded. Summarizing content to fit the model context window. Might take a while...",
|
||||
color="yellow",
|
||||
)
|
||||
summarize_messages(messages=messages, llm=llm, callbacks=callbacks, i18n=i18n)
|
||||
else:
|
||||
if verbose:
|
||||
printer.print(
|
||||
content="Context length exceeded. Consider using smaller text or RAG tools from crewai_tools.",
|
||||
color="red",
|
||||
)
|
||||
printer.print(
|
||||
content="Context length exceeded. Consider using smaller text or RAG tools from crewai_tools.",
|
||||
color="red",
|
||||
)
|
||||
raise SystemExit(
|
||||
"Context length exceeded and user opted not to summarize. Consider using smaller text or RAG tools from crewai_tools."
|
||||
)
|
||||
@@ -638,7 +613,6 @@ def summarize_messages(
|
||||
llm: LLM | BaseLLM,
|
||||
callbacks: list[TokenCalcHandler],
|
||||
i18n: I18N,
|
||||
verbose: bool = True,
|
||||
) -> None:
|
||||
"""Summarize messages to fit within context window.
|
||||
|
||||
@@ -670,11 +644,10 @@ def summarize_messages(
|
||||
|
||||
total_groups = len(messages_groups)
|
||||
for idx, group in enumerate(messages_groups, 1):
|
||||
if verbose:
|
||||
Printer().print(
|
||||
content=f"Summarizing {idx}/{total_groups}...",
|
||||
color="yellow",
|
||||
)
|
||||
Printer().print(
|
||||
content=f"Summarizing {idx}/{total_groups}...",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
summarization_messages = [
|
||||
format_message_for_llm(
|
||||
@@ -932,14 +905,12 @@ def extract_tool_call_info(
|
||||
def _setup_before_llm_call_hooks(
|
||||
executor_context: CrewAgentExecutor | AgentExecutor | LiteAgent | None,
|
||||
printer: Printer,
|
||||
verbose: bool = True,
|
||||
) -> bool:
|
||||
"""Setup and invoke before_llm_call hooks for the executor context.
|
||||
|
||||
Args:
|
||||
executor_context: The executor context to setup the hooks for.
|
||||
printer: Printer instance for error logging.
|
||||
verbose: Whether to print output.
|
||||
|
||||
Returns:
|
||||
True if LLM execution should proceed, False if blocked by a hook.
|
||||
@@ -954,29 +925,26 @@ def _setup_before_llm_call_hooks(
|
||||
for hook in executor_context.before_llm_call_hooks:
|
||||
result = hook(hook_context)
|
||||
if result is False:
|
||||
if verbose:
|
||||
printer.print(
|
||||
content="LLM call blocked by before_llm_call hook",
|
||||
color="yellow",
|
||||
)
|
||||
printer.print(
|
||||
content="LLM call blocked by before_llm_call hook",
|
||||
color="yellow",
|
||||
)
|
||||
return False
|
||||
except Exception as e:
|
||||
if verbose:
|
||||
printer.print(
|
||||
content=f"Error in before_llm_call hook: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
printer.print(
|
||||
content=f"Error in before_llm_call hook: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
if not isinstance(executor_context.messages, list):
|
||||
if verbose:
|
||||
printer.print(
|
||||
content=(
|
||||
"Warning: before_llm_call hook replaced messages with non-list. "
|
||||
"Restoring original messages list. Hooks should modify messages in-place, "
|
||||
"not replace the list (e.g., use context.messages.append() not context.messages = [])."
|
||||
),
|
||||
color="yellow",
|
||||
)
|
||||
printer.print(
|
||||
content=(
|
||||
"Warning: before_llm_call hook replaced messages with non-list. "
|
||||
"Restoring original messages list. Hooks should modify messages in-place, "
|
||||
"not replace the list (e.g., use context.messages.append() not context.messages = [])."
|
||||
),
|
||||
color="yellow",
|
||||
)
|
||||
if isinstance(original_messages, list):
|
||||
executor_context.messages = original_messages
|
||||
else:
|
||||
@@ -987,79 +955,49 @@ def _setup_before_llm_call_hooks(
|
||||
|
||||
def _setup_after_llm_call_hooks(
|
||||
executor_context: CrewAgentExecutor | AgentExecutor | LiteAgent | None,
|
||||
answer: str | BaseModel,
|
||||
answer: str,
|
||||
printer: Printer,
|
||||
verbose: bool = True,
|
||||
) -> str | BaseModel:
|
||||
) -> str:
|
||||
"""Setup and invoke after_llm_call hooks for the executor context.
|
||||
|
||||
Args:
|
||||
executor_context: The executor context to setup the hooks for.
|
||||
answer: The LLM response (string or Pydantic model).
|
||||
answer: The LLM response string.
|
||||
printer: Printer instance for error logging.
|
||||
verbose: Whether to print output.
|
||||
|
||||
Returns:
|
||||
The potentially modified response (string or Pydantic model).
|
||||
The potentially modified response string.
|
||||
"""
|
||||
if executor_context and executor_context.after_llm_call_hooks:
|
||||
from crewai.hooks.llm_hooks import LLMCallHookContext
|
||||
|
||||
original_messages = executor_context.messages
|
||||
|
||||
# For Pydantic models, serialize to JSON for hooks
|
||||
if isinstance(answer, BaseModel):
|
||||
pydantic_answer = answer
|
||||
hook_response: str = pydantic_answer.model_dump_json()
|
||||
original_json: str = hook_response
|
||||
else:
|
||||
pydantic_answer = None
|
||||
hook_response = str(answer)
|
||||
|
||||
hook_context = LLMCallHookContext(executor_context, response=hook_response)
|
||||
hook_context = LLMCallHookContext(executor_context, response=answer)
|
||||
try:
|
||||
for hook in executor_context.after_llm_call_hooks:
|
||||
modified_response = hook(hook_context)
|
||||
if modified_response is not None and isinstance(modified_response, str):
|
||||
hook_response = modified_response
|
||||
answer = modified_response
|
||||
|
||||
except Exception as e:
|
||||
if verbose:
|
||||
printer.print(
|
||||
content=f"Error in after_llm_call hook: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
printer.print(
|
||||
content=f"Error in after_llm_call hook: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
if not isinstance(executor_context.messages, list):
|
||||
if verbose:
|
||||
printer.print(
|
||||
content=(
|
||||
"Warning: after_llm_call hook replaced messages with non-list. "
|
||||
"Restoring original messages list. Hooks should modify messages in-place, "
|
||||
"not replace the list (e.g., use context.messages.append() not context.messages = [])."
|
||||
),
|
||||
color="yellow",
|
||||
)
|
||||
printer.print(
|
||||
content=(
|
||||
"Warning: after_llm_call hook replaced messages with non-list. "
|
||||
"Restoring original messages list. Hooks should modify messages in-place, "
|
||||
"not replace the list (e.g., use context.messages.append() not context.messages = [])."
|
||||
),
|
||||
color="yellow",
|
||||
)
|
||||
if isinstance(original_messages, list):
|
||||
executor_context.messages = original_messages
|
||||
else:
|
||||
executor_context.messages = []
|
||||
|
||||
# If hooks modified the response, update answer accordingly
|
||||
if pydantic_answer is not None:
|
||||
# For Pydantic models, reparse the JSON if it was modified
|
||||
if hook_response != original_json:
|
||||
try:
|
||||
model_class: type[BaseModel] = type(pydantic_answer)
|
||||
answer = model_class.model_validate_json(hook_response)
|
||||
except Exception as e:
|
||||
if verbose:
|
||||
printer.print(
|
||||
content=f"Warning: Hook modified response but failed to reparse as {type(pydantic_answer).__name__}: {e}. Using original model.",
|
||||
color="yellow",
|
||||
)
|
||||
else:
|
||||
# For string responses, use the hook-modified response
|
||||
answer = hook_response
|
||||
|
||||
return answer
|
||||
|
||||
@@ -62,10 +62,7 @@ class Converter(OutputConverter):
|
||||
],
|
||||
response_model=self.model,
|
||||
)
|
||||
if isinstance(response, BaseModel):
|
||||
result = response
|
||||
else:
|
||||
result = self.model.model_validate_json(response)
|
||||
result = self.model.model_validate_json(response)
|
||||
else:
|
||||
response = self.llm.call(
|
||||
[
|
||||
@@ -208,11 +205,10 @@ def convert_to_model(
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
if agent and getattr(agent, "verbose", True):
|
||||
Printer().print(
|
||||
content=f"Unexpected error during model conversion: {type(e).__name__}: {e}. Returning original result.",
|
||||
color="red",
|
||||
)
|
||||
Printer().print(
|
||||
content=f"Unexpected error during model conversion: {type(e).__name__}: {e}. Returning original result.",
|
||||
color="red",
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
@@ -266,11 +262,10 @@ def handle_partial_json(
|
||||
except ValidationError:
|
||||
raise
|
||||
except Exception as e:
|
||||
if agent and getattr(agent, "verbose", True):
|
||||
Printer().print(
|
||||
content=f"Unexpected error during partial JSON handling: {type(e).__name__}: {e}. Attempting alternative conversion method.",
|
||||
color="red",
|
||||
)
|
||||
Printer().print(
|
||||
content=f"Unexpected error during partial JSON handling: {type(e).__name__}: {e}. Attempting alternative conversion method.",
|
||||
color="red",
|
||||
)
|
||||
|
||||
return convert_with_instructions(
|
||||
result=result,
|
||||
@@ -328,11 +323,10 @@ def convert_with_instructions(
|
||||
)
|
||||
|
||||
if isinstance(exported_result, ConverterError):
|
||||
if agent and getattr(agent, "verbose", True):
|
||||
Printer().print(
|
||||
content=f"Failed to convert result to model: {exported_result}",
|
||||
color="red",
|
||||
)
|
||||
Printer().print(
|
||||
content=f"Failed to convert result to model: {exported_result}",
|
||||
color="red",
|
||||
)
|
||||
return result
|
||||
|
||||
return exported_result
|
||||
|
||||
@@ -23,13 +23,7 @@ class SystemPromptResult(StandardPromptResult):
|
||||
|
||||
|
||||
COMPONENTS = Literal[
|
||||
"role_playing",
|
||||
"tools",
|
||||
"no_tools",
|
||||
"native_tools",
|
||||
"task",
|
||||
"native_task",
|
||||
"task_no_tools",
|
||||
"role_playing", "tools", "no_tools", "native_tools", "task", "native_task"
|
||||
]
|
||||
|
||||
|
||||
@@ -80,14 +74,11 @@ class Prompts(BaseModel):
|
||||
slices.append("no_tools")
|
||||
system: str = self._build_prompt(slices)
|
||||
|
||||
# Determine which task slice to use:
|
||||
task_slice: COMPONENTS
|
||||
if self.use_native_tool_calling:
|
||||
task_slice = "native_task"
|
||||
elif self.has_tools:
|
||||
task_slice = "task"
|
||||
else:
|
||||
task_slice = "task_no_tools"
|
||||
# Use native_task for native tool calling (no "Thought:" prompt)
|
||||
# Use task for ReAct pattern (includes "Thought:" prompt)
|
||||
task_slice: COMPONENTS = (
|
||||
"native_task" if self.use_native_tool_calling else "task"
|
||||
)
|
||||
slices.append(task_slice)
|
||||
|
||||
if (
|
||||
|
||||
@@ -1004,53 +1004,3 @@ def test_prepare_kickoff_param_files_override_message_files():
|
||||
|
||||
assert "files" in inputs
|
||||
assert inputs["files"]["same.png"] is param_file # param takes precedence
|
||||
|
||||
|
||||
def test_lite_agent_verbose_false_suppresses_printer_output():
|
||||
"""Test that setting verbose=False suppresses all printer output."""
|
||||
from crewai.agents.parser import AgentFinish
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
|
||||
mock_llm = Mock(spec=LLM)
|
||||
mock_llm.call.return_value = "Final Answer: Hello!"
|
||||
mock_llm.stop = []
|
||||
mock_llm.supports_stop_words.return_value = False
|
||||
mock_llm.get_token_usage_summary.return_value = UsageMetrics(
|
||||
total_tokens=100,
|
||||
prompt_tokens=50,
|
||||
completion_tokens=50,
|
||||
cached_prompt_tokens=0,
|
||||
successful_requests=1,
|
||||
)
|
||||
|
||||
with pytest.warns(DeprecationWarning):
|
||||
agent = LiteAgent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
llm=mock_llm,
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
result = agent.kickoff("Say hello")
|
||||
|
||||
assert result is not None
|
||||
assert isinstance(result, LiteAgentOutput)
|
||||
# Verify the printer was never called
|
||||
agent._printer.print = Mock()
|
||||
# For a clean verification, patch printer before execution
|
||||
with pytest.warns(DeprecationWarning):
|
||||
agent2 = LiteAgent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
llm=mock_llm,
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
mock_printer = Mock()
|
||||
agent2._printer = mock_printer
|
||||
|
||||
agent2.kickoff("Say hello")
|
||||
|
||||
mock_printer.print.assert_not_called()
|
||||
|
||||
@@ -1,112 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Language Detector. You
|
||||
are an expert linguist who can identify languages.\nYour personal goal is: Detect
|
||||
the language of text"},{"role":"user","content":"\nCurrent Task: What language
|
||||
is this text written in: ''Hello, how are you?''\n\nThis is the expected criteria
|
||||
for your final answer: The detected language (e.g., English, Spanish, etc.)\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nProvide
|
||||
your complete response:"}],"model":"gpt-4o-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '530'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D39bkotgEapBcz1sSIXvhPhK9G7FD\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1769644288,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"English\",\n \"refusal\": null,\n
|
||||
\ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 101,\n \"completion_tokens\":
|
||||
1,\n \"total_tokens\": 102,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_3683ee3deb\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 28 Jan 2026 23:51:28 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '279'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -1,111 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Classifier. You classify
|
||||
text sentiment accurately.\nYour personal goal is: Classify text sentiment"},{"role":"user","content":"\nCurrent
|
||||
Task: Classify the sentiment of: ''I love this product!''\n\nThis is the expected
|
||||
criteria for your final answer: One word: positive, negative, or neutral\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nProvide
|
||||
your complete response:"}],"model":"gpt-4o-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '481'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D39bkVPelOZanWIMBoIyzsuj072sM\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1769644288,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"positive\",\n \"refusal\": null,\n
|
||||
\ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 89,\n \"completion_tokens\":
|
||||
1,\n \"total_tokens\": 90,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_3683ee3deb\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 28 Jan 2026 23:51:29 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '323'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -157,10 +157,10 @@ async def test_anthropic_async_with_response_model():
|
||||
"Say hello in French",
|
||||
response_model=GreetingResponse
|
||||
)
|
||||
# When response_model is provided, the result is already a parsed Pydantic model instance
|
||||
assert isinstance(result, GreetingResponse)
|
||||
assert isinstance(result.greeting, str)
|
||||
assert isinstance(result.language, str)
|
||||
model = GreetingResponse.model_validate_json(result)
|
||||
assert isinstance(model, GreetingResponse)
|
||||
assert isinstance(model.greeting, str)
|
||||
assert isinstance(model.language, str)
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
|
||||
@@ -799,131 +799,3 @@ def test_google_express_mode_works() -> None:
|
||||
assert result.token_usage.prompt_tokens > 0
|
||||
assert result.token_usage.completion_tokens > 0
|
||||
assert result.token_usage.successful_requests >= 1
|
||||
|
||||
|
||||
def test_gemini_2_0_model_detection():
|
||||
"""Test that Gemini 2.0 models are properly detected."""
|
||||
# Test Gemini 2.0 models
|
||||
llm_2_0 = LLM(model="google/gemini-2.0-flash-001")
|
||||
from crewai.llms.providers.gemini.completion import GeminiCompletion
|
||||
assert isinstance(llm_2_0, GeminiCompletion)
|
||||
assert llm_2_0.is_gemini_2_0 is True
|
||||
|
||||
llm_2_5 = LLM(model="google/gemini-2.5-flash")
|
||||
assert isinstance(llm_2_5, GeminiCompletion)
|
||||
assert llm_2_5.is_gemini_2_0 is True
|
||||
|
||||
# Test non-2.0 models
|
||||
llm_1_5 = LLM(model="google/gemini-1.5-pro")
|
||||
assert isinstance(llm_1_5, GeminiCompletion)
|
||||
assert llm_1_5.is_gemini_2_0 is False
|
||||
|
||||
|
||||
def test_add_property_ordering_to_schema():
|
||||
"""Test that _add_property_ordering correctly adds propertyOrdering to schemas."""
|
||||
from crewai.llms.providers.gemini.completion import GeminiCompletion
|
||||
|
||||
# Test simple object schema
|
||||
simple_schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {"type": "string"},
|
||||
"age": {"type": "integer"},
|
||||
"email": {"type": "string"}
|
||||
}
|
||||
}
|
||||
|
||||
result = GeminiCompletion._add_property_ordering(simple_schema)
|
||||
|
||||
assert "propertyOrdering" in result
|
||||
assert result["propertyOrdering"] == ["name", "age", "email"]
|
||||
|
||||
# Test nested object schema
|
||||
nested_schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"user": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {"type": "string"},
|
||||
"contact": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"email": {"type": "string"},
|
||||
"phone": {"type": "string"}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"id": {"type": "integer"}
|
||||
}
|
||||
}
|
||||
|
||||
result = GeminiCompletion._add_property_ordering(nested_schema)
|
||||
|
||||
assert "propertyOrdering" in result
|
||||
assert result["propertyOrdering"] == ["user", "id"]
|
||||
assert "propertyOrdering" in result["properties"]["user"]
|
||||
assert result["properties"]["user"]["propertyOrdering"] == ["name", "contact"]
|
||||
assert "propertyOrdering" in result["properties"]["user"]["properties"]["contact"]
|
||||
assert result["properties"]["user"]["properties"]["contact"]["propertyOrdering"] == ["email", "phone"]
|
||||
|
||||
|
||||
def test_gemini_2_0_response_model_with_property_ordering():
|
||||
"""Test that Gemini 2.0 models include propertyOrdering in response schemas."""
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
class TestResponse(BaseModel):
|
||||
"""Test response model."""
|
||||
name: str = Field(..., description="The name")
|
||||
age: int = Field(..., description="The age")
|
||||
email: str = Field(..., description="The email")
|
||||
|
||||
llm = LLM(model="google/gemini-2.0-flash-001")
|
||||
|
||||
# Prepare generation config with response model
|
||||
config = llm._prepare_generation_config(response_model=TestResponse)
|
||||
|
||||
# Verify that the config has response_json_schema
|
||||
assert hasattr(config, 'response_json_schema') or 'response_json_schema' in config.__dict__
|
||||
|
||||
# Get the schema
|
||||
if hasattr(config, 'response_json_schema'):
|
||||
schema = config.response_json_schema
|
||||
else:
|
||||
schema = config.__dict__.get('response_json_schema', {})
|
||||
|
||||
# Verify propertyOrdering is present for Gemini 2.0
|
||||
assert "propertyOrdering" in schema
|
||||
assert "name" in schema["propertyOrdering"]
|
||||
assert "age" in schema["propertyOrdering"]
|
||||
assert "email" in schema["propertyOrdering"]
|
||||
|
||||
|
||||
def test_gemini_1_5_response_model_uses_response_schema():
|
||||
"""Test that Gemini 1.5 models use response_schema parameter (not response_json_schema)."""
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
class TestResponse(BaseModel):
|
||||
"""Test response model."""
|
||||
name: str = Field(..., description="The name")
|
||||
age: int = Field(..., description="The age")
|
||||
|
||||
llm = LLM(model="google/gemini-1.5-pro")
|
||||
|
||||
# Prepare generation config with response model
|
||||
config = llm._prepare_generation_config(response_model=TestResponse)
|
||||
|
||||
# Verify that the config uses response_schema (not response_json_schema)
|
||||
assert hasattr(config, 'response_schema') or 'response_schema' in config.__dict__
|
||||
assert not (hasattr(config, 'response_json_schema') and config.response_json_schema is not None)
|
||||
|
||||
# Get the schema
|
||||
if hasattr(config, 'response_schema'):
|
||||
schema = config.response_schema
|
||||
else:
|
||||
schema = config.__dict__.get('response_schema')
|
||||
|
||||
# For Gemini 1.5, response_schema should be the Pydantic model itself
|
||||
# The SDK handles conversion internally
|
||||
assert schema is TestResponse or isinstance(schema, type)
|
||||
|
||||
@@ -540,9 +540,7 @@ def test_openai_streaming_with_response_model():
|
||||
result = llm.call("Test question", response_model=TestResponse)
|
||||
|
||||
assert result is not None
|
||||
assert isinstance(result, TestResponse)
|
||||
assert result.answer == "test"
|
||||
assert result.confidence == 0.95
|
||||
assert isinstance(result, str)
|
||||
|
||||
assert mock_stream.called
|
||||
call_kwargs = mock_stream.call_args[1]
|
||||
|
||||
@@ -2585,7 +2585,6 @@ def test_warning_long_term_memory_without_entity_memory():
|
||||
goal="You research about math.",
|
||||
backstory="You're an expert in research and you love to learn new things.",
|
||||
allow_delegation=False,
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
task1 = Task(
|
||||
|
||||
@@ -1,234 +0,0 @@
|
||||
"""Tests for prompt generation to prevent thought leakage.
|
||||
|
||||
These tests verify that:
|
||||
1. Agents without tools don't get ReAct format instructions
|
||||
2. The generated prompts don't encourage "Thought:" prefixes that leak into output
|
||||
3. Real LLM calls produce clean output without internal reasoning
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai import Agent, Crew, Task
|
||||
from crewai.llm import LLM
|
||||
from crewai.utilities.prompts import Prompts
|
||||
|
||||
|
||||
class TestNoToolsPromptGeneration:
|
||||
"""Tests for prompt generation when agent has no tools."""
|
||||
|
||||
def test_no_tools_uses_task_no_tools_slice(self) -> None:
|
||||
"""Test that agents without tools use task_no_tools slice instead of task."""
|
||||
mock_agent = MagicMock()
|
||||
mock_agent.role = "Test Agent"
|
||||
mock_agent.goal = "Test goal"
|
||||
mock_agent.backstory = "Test backstory"
|
||||
|
||||
prompts = Prompts(
|
||||
has_tools=False,
|
||||
use_native_tool_calling=False,
|
||||
use_system_prompt=True,
|
||||
agent=mock_agent,
|
||||
)
|
||||
|
||||
result = prompts.task_execution()
|
||||
|
||||
# Verify it's a SystemPromptResult with system and user keys
|
||||
assert "system" in result
|
||||
assert "user" in result
|
||||
assert "prompt" in result
|
||||
|
||||
# The user prompt should NOT contain "Thought:" (ReAct format)
|
||||
assert "Thought:" not in result["user"]
|
||||
|
||||
# The user prompt should NOT mention tools
|
||||
assert "use the tools available" not in result["user"]
|
||||
assert "tools available" not in result["user"].lower()
|
||||
|
||||
# The system prompt should NOT contain ReAct format instructions
|
||||
assert "Thought:" not in result["system"]
|
||||
assert "Final Answer:" not in result["system"]
|
||||
|
||||
def test_no_tools_prompt_is_simple(self) -> None:
|
||||
"""Test that no-tools prompt is simple and direct."""
|
||||
mock_agent = MagicMock()
|
||||
mock_agent.role = "Language Detector"
|
||||
mock_agent.goal = "Detect language"
|
||||
mock_agent.backstory = "Expert linguist"
|
||||
|
||||
prompts = Prompts(
|
||||
has_tools=False,
|
||||
use_native_tool_calling=False,
|
||||
use_system_prompt=True,
|
||||
agent=mock_agent,
|
||||
)
|
||||
|
||||
result = prompts.task_execution()
|
||||
|
||||
# Should contain the role playing info
|
||||
assert "Language Detector" in result["system"]
|
||||
|
||||
# User prompt should be simple with just the task
|
||||
assert "Current Task:" in result["user"]
|
||||
assert "Provide your complete response:" in result["user"]
|
||||
|
||||
def test_with_tools_uses_task_slice_with_react(self) -> None:
|
||||
"""Test that agents WITH tools use the task slice (ReAct format)."""
|
||||
mock_agent = MagicMock()
|
||||
mock_agent.role = "Test Agent"
|
||||
mock_agent.goal = "Test goal"
|
||||
mock_agent.backstory = "Test backstory"
|
||||
|
||||
prompts = Prompts(
|
||||
has_tools=True,
|
||||
use_native_tool_calling=False,
|
||||
use_system_prompt=True,
|
||||
agent=mock_agent,
|
||||
)
|
||||
|
||||
result = prompts.task_execution()
|
||||
|
||||
# With tools and ReAct, the prompt SHOULD contain Thought:
|
||||
assert "Thought:" in result["user"]
|
||||
|
||||
def test_native_tools_uses_native_task_slice(self) -> None:
|
||||
"""Test that native tool calling uses native_task slice."""
|
||||
mock_agent = MagicMock()
|
||||
mock_agent.role = "Test Agent"
|
||||
mock_agent.goal = "Test goal"
|
||||
mock_agent.backstory = "Test backstory"
|
||||
|
||||
prompts = Prompts(
|
||||
has_tools=True,
|
||||
use_native_tool_calling=True,
|
||||
use_system_prompt=True,
|
||||
agent=mock_agent,
|
||||
)
|
||||
|
||||
result = prompts.task_execution()
|
||||
|
||||
# Native tool calling should NOT have Thought: in user prompt
|
||||
assert "Thought:" not in result["user"]
|
||||
|
||||
# Should NOT have emotional manipulation
|
||||
assert "your job depends on it" not in result["user"]
|
||||
|
||||
|
||||
class TestNoThoughtLeakagePatterns:
|
||||
"""Tests to verify prompts don't encourage thought leakage."""
|
||||
|
||||
def test_no_job_depends_on_it_in_no_tools(self) -> None:
|
||||
"""Test that 'your job depends on it' is not in no-tools prompts."""
|
||||
mock_agent = MagicMock()
|
||||
mock_agent.role = "Test"
|
||||
mock_agent.goal = "Test"
|
||||
mock_agent.backstory = "Test"
|
||||
|
||||
prompts = Prompts(
|
||||
has_tools=False,
|
||||
use_native_tool_calling=False,
|
||||
use_system_prompt=True,
|
||||
agent=mock_agent,
|
||||
)
|
||||
|
||||
result = prompts.task_execution()
|
||||
|
||||
full_prompt = result["prompt"]
|
||||
assert "your job depends on it" not in full_prompt.lower()
|
||||
assert "i must use these formats" not in full_prompt.lower()
|
||||
|
||||
def test_no_job_depends_on_it_in_native_task(self) -> None:
|
||||
"""Test that 'your job depends on it' is not in native task prompts."""
|
||||
mock_agent = MagicMock()
|
||||
mock_agent.role = "Test"
|
||||
mock_agent.goal = "Test"
|
||||
mock_agent.backstory = "Test"
|
||||
|
||||
prompts = Prompts(
|
||||
has_tools=True,
|
||||
use_native_tool_calling=True,
|
||||
use_system_prompt=True,
|
||||
agent=mock_agent,
|
||||
)
|
||||
|
||||
result = prompts.task_execution()
|
||||
|
||||
full_prompt = result["prompt"]
|
||||
assert "your job depends on it" not in full_prompt.lower()
|
||||
|
||||
|
||||
class TestRealLLMNoThoughtLeakage:
|
||||
"""Integration tests with real LLM calls to verify no thought leakage."""
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_without_tools_no_thought_in_output(self) -> None:
|
||||
"""Test that agent without tools produces clean output without 'Thought:' prefix."""
|
||||
agent = Agent(
|
||||
role="Language Detector",
|
||||
goal="Detect the language of text",
|
||||
backstory="You are an expert linguist who can identify languages.",
|
||||
tools=[], # No tools
|
||||
llm=LLM(model="gpt-4o-mini"),
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="What language is this text written in: 'Hello, how are you?'",
|
||||
expected_output="The detected language (e.g., English, Spanish, etc.)",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
result = crew.kickoff()
|
||||
|
||||
assert result is not None
|
||||
assert result.raw is not None
|
||||
|
||||
# The output should NOT start with "Thought:" or contain ReAct artifacts
|
||||
output = str(result.raw)
|
||||
assert not output.strip().startswith("Thought:")
|
||||
assert "Final Answer:" not in output
|
||||
assert "I now can give a great answer" not in output
|
||||
|
||||
# Should contain an actual answer about the language
|
||||
assert any(
|
||||
lang in output.lower()
|
||||
for lang in ["english", "en", "language"]
|
||||
)
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_simple_task_clean_output(self) -> None:
|
||||
"""Test that a simple task produces clean output without internal reasoning."""
|
||||
agent = Agent(
|
||||
role="Classifier",
|
||||
goal="Classify text sentiment",
|
||||
backstory="You classify text sentiment accurately.",
|
||||
tools=[],
|
||||
llm=LLM(model="gpt-4o-mini"),
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Classify the sentiment of: 'I love this product!'",
|
||||
expected_output="One word: positive, negative, or neutral",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
result = crew.kickoff()
|
||||
|
||||
assert result is not None
|
||||
output = str(result.raw).strip().lower()
|
||||
|
||||
# Output should be clean - just the classification
|
||||
assert not output.startswith("thought:")
|
||||
assert "final answer:" not in output
|
||||
|
||||
# Should contain the actual classification
|
||||
assert any(
|
||||
sentiment in output
|
||||
for sentiment in ["positive", "negative", "neutral"]
|
||||
)
|
||||
@@ -1,3 +1,3 @@
|
||||
"""CrewAI development tools."""
|
||||
|
||||
__version__ = "1.9.2"
|
||||
__version__ = "1.9.0"
|
||||
|
||||
Reference in New Issue
Block a user