mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-25 08:48:31 +00:00
Compare commits
8 Commits
fix/clone_
...
bugfix/llm
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a8124cda06 | ||
|
|
7bed63a693 | ||
|
|
c471f67074 | ||
|
|
973a860415 | ||
|
|
2709a9205a | ||
|
|
d19d7b01ec | ||
|
|
a3ad2c1957 | ||
|
|
9f6b4cefd3 |
@@ -43,7 +43,7 @@ Think of an agent as a specialized team member with specific skills, expertise,
|
||||
| **Max Retry Limit** _(optional)_ | `max_retry_limit` | `int` | Maximum number of retries when an error occurs. Default is 2. |
|
||||
| **Respect Context Window** _(optional)_ | `respect_context_window` | `bool` | Keep messages under context window size by summarizing. Default is True. |
|
||||
| **Code Execution Mode** _(optional)_ | `code_execution_mode` | `Literal["safe", "unsafe"]` | Mode for code execution: 'safe' (using Docker) or 'unsafe' (direct). Default is 'safe'. |
|
||||
| **Embedder Config** _(optional)_ | `embedder_config` | `Optional[Dict[str, Any]]` | Configuration for the embedder used by the agent. |
|
||||
| **Embedder** _(optional)_ | `embedder` | `Optional[Dict[str, Any]]` | Configuration for the embedder used by the agent. |
|
||||
| **Knowledge Sources** _(optional)_ | `knowledge_sources` | `Optional[List[BaseKnowledgeSource]]` | Knowledge sources available to the agent. |
|
||||
| **Use System Prompt** _(optional)_ | `use_system_prompt` | `Optional[bool]` | Whether to use system prompt (for o1 model support). Default is True. |
|
||||
|
||||
@@ -152,7 +152,7 @@ agent = Agent(
|
||||
use_system_prompt=True, # Default: True
|
||||
tools=[SerperDevTool()], # Optional: List of tools
|
||||
knowledge_sources=None, # Optional: List of knowledge sources
|
||||
embedder_config=None, # Optional: Custom embedder configuration
|
||||
embedder=None, # Optional: Custom embedder configuration
|
||||
system_template=None, # Optional: Custom system prompt template
|
||||
prompt_template=None, # Optional: Custom prompt template
|
||||
response_template=None, # Optional: Custom response template
|
||||
|
||||
@@ -324,6 +324,13 @@ agent = Agent(
|
||||
verbose=True,
|
||||
allow_delegation=False,
|
||||
llm=gemini_llm,
|
||||
embedder={
|
||||
"provider": "google",
|
||||
"config": {
|
||||
"model": "models/text-embedding-004",
|
||||
"api_key": GEMINI_API_KEY,
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
task = Task(
|
||||
|
||||
@@ -33,11 +33,12 @@ crew = Crew(
|
||||
| :------------------------------- | :---------------- | :---------------------------- | :------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Description** | `description` | `str` | A clear, concise statement of what the task entails. |
|
||||
| **Expected Output** | `expected_output` | `str` | A detailed description of what the task's completion looks like. |
|
||||
| **Name** _(optional)_ | `name` | `Optional[str]` | A name identifier for the task. |
|
||||
| **Agent** _(optional)_ | `agent` | `Optional[BaseAgent]` | The agent responsible for executing the task. |
|
||||
| **Tools** _(optional)_ | `tools` | `List[BaseTool]` | The tools/resources the agent is limited to use for this task. |
|
||||
| **Name** _(optional)_ | `name` | `Optional[str]` | A name identifier for the task. |
|
||||
| **Agent** _(optional)_ | `agent` | `Optional[BaseAgent]` | The agent responsible for executing the task. |
|
||||
| **Tools** _(optional)_ | `tools` | `List[BaseTool]` | The tools/resources the agent is limited to use for this task. |
|
||||
| **Context** _(optional)_ | `context` | `Optional[List["Task"]]` | Other tasks whose outputs will be used as context for this task. |
|
||||
| **Async Execution** _(optional)_ | `async_execution` | `Optional[bool]` | Whether the task should be executed asynchronously. Defaults to False. |
|
||||
| **Human Input** _(optional)_ | `human_input` | `Optional[bool]` | Whether the task should have a human review the final answer of the agent. Defaults to False. |
|
||||
| **Config** _(optional)_ | `config` | `Optional[Dict[str, Any]]` | Task-specific configuration parameters. |
|
||||
| **Output File** _(optional)_ | `output_file` | `Optional[str]` | File path for storing the task output. |
|
||||
| **Output JSON** _(optional)_ | `output_json` | `Optional[Type[BaseModel]]` | A Pydantic model to structure the JSON output. |
|
||||
|
||||
@@ -61,6 +61,7 @@ class Agent(BaseAgent):
|
||||
tools: Tools at agents disposal
|
||||
step_callback: Callback to be executed after each step of the agent execution.
|
||||
knowledge_sources: Knowledge sources for the agent.
|
||||
embedder: Embedder configuration for the agent.
|
||||
"""
|
||||
|
||||
_times_executed: int = PrivateAttr(default=0)
|
||||
@@ -122,17 +123,10 @@ class Agent(BaseAgent):
|
||||
default="safe",
|
||||
description="Mode for code execution: 'safe' (using Docker) or 'unsafe' (direct execution).",
|
||||
)
|
||||
embedder_config: Optional[Dict[str, Any]] = Field(
|
||||
embedder: Optional[Dict[str, Any]] = Field(
|
||||
default=None,
|
||||
description="Embedder configuration for the agent.",
|
||||
)
|
||||
knowledge_sources: Optional[List[BaseKnowledgeSource]] = Field(
|
||||
default=None,
|
||||
description="Knowledge sources for the agent.",
|
||||
)
|
||||
_knowledge: Optional[Knowledge] = PrivateAttr(
|
||||
default=None,
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def post_init_setup(self):
|
||||
@@ -163,10 +157,11 @@ class Agent(BaseAgent):
|
||||
if isinstance(self.knowledge_sources, list) and all(
|
||||
isinstance(k, BaseKnowledgeSource) for k in self.knowledge_sources
|
||||
):
|
||||
self._knowledge = Knowledge(
|
||||
self.knowledge = Knowledge(
|
||||
sources=self.knowledge_sources,
|
||||
embedder_config=self.embedder_config,
|
||||
embedder=self.embedder,
|
||||
collection_name=knowledge_agent_name,
|
||||
storage=self.knowledge_storage or None,
|
||||
)
|
||||
except (TypeError, ValueError) as e:
|
||||
raise ValueError(f"Invalid Knowledge Configuration: {str(e)}")
|
||||
@@ -225,8 +220,8 @@ class Agent(BaseAgent):
|
||||
if memory.strip() != "":
|
||||
task_prompt += self.i18n.slice("memory").format(memory=memory)
|
||||
|
||||
if self._knowledge:
|
||||
agent_knowledge_snippets = self._knowledge.query([task.prompt()])
|
||||
if self.knowledge:
|
||||
agent_knowledge_snippets = self.knowledge.query([task.prompt()])
|
||||
if agent_knowledge_snippets:
|
||||
agent_knowledge_context = extract_knowledge_context(
|
||||
agent_knowledge_snippets
|
||||
|
||||
@@ -18,6 +18,8 @@ from pydantic_core import PydanticCustomError
|
||||
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
|
||||
from crewai.agents.cache.cache_handler import CacheHandler
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.tools import BaseTool
|
||||
from crewai.tools.base_tool import Tool
|
||||
from crewai.utilities import I18N, Logger, RPMController
|
||||
@@ -48,6 +50,8 @@ class BaseAgent(ABC, BaseModel):
|
||||
cache_handler (InstanceOf[CacheHandler]): An instance of the CacheHandler class.
|
||||
tools_handler (InstanceOf[ToolsHandler]): An instance of the ToolsHandler class.
|
||||
max_tokens: Maximum number of tokens for the agent to generate in a response.
|
||||
knowledge_sources: Knowledge sources for the agent.
|
||||
knowledge_storage: Custom knowledge storage for the agent.
|
||||
|
||||
|
||||
Methods:
|
||||
@@ -130,6 +134,17 @@ class BaseAgent(ABC, BaseModel):
|
||||
max_tokens: Optional[int] = Field(
|
||||
default=None, description="Maximum number of tokens for the agent's execution."
|
||||
)
|
||||
knowledge: Optional[Knowledge] = Field(
|
||||
default=None, description="Knowledge for the agent."
|
||||
)
|
||||
knowledge_sources: Optional[List[BaseKnowledgeSource]] = Field(
|
||||
default=None,
|
||||
description="Knowledge sources for the agent.",
|
||||
)
|
||||
knowledge_storage: Optional[Any] = Field(
|
||||
default=None,
|
||||
description="Custom knowledge storage for the agent.",
|
||||
)
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
@@ -256,13 +271,44 @@ class BaseAgent(ABC, BaseModel):
|
||||
"tools_handler",
|
||||
"cache_handler",
|
||||
"llm",
|
||||
"knowledge_sources",
|
||||
"knowledge_storage",
|
||||
"knowledge",
|
||||
}
|
||||
|
||||
# Copy llm and clear callbacks
|
||||
# Copy llm
|
||||
existing_llm = shallow_copy(self.llm)
|
||||
copied_knowledge = shallow_copy(self.knowledge)
|
||||
copied_knowledge_storage = shallow_copy(self.knowledge_storage)
|
||||
# Properly copy knowledge sources if they exist
|
||||
existing_knowledge_sources = None
|
||||
if self.knowledge_sources:
|
||||
# Create a shared storage instance for all knowledge sources
|
||||
shared_storage = (
|
||||
self.knowledge_sources[0].storage if self.knowledge_sources else None
|
||||
)
|
||||
|
||||
existing_knowledge_sources = []
|
||||
for source in self.knowledge_sources:
|
||||
copied_source = (
|
||||
source.model_copy()
|
||||
if hasattr(source, "model_copy")
|
||||
else shallow_copy(source)
|
||||
)
|
||||
# Ensure all copied sources use the same storage instance
|
||||
copied_source.storage = shared_storage
|
||||
existing_knowledge_sources.append(copied_source)
|
||||
|
||||
copied_data = self.model_dump(exclude=exclude)
|
||||
copied_data = {k: v for k, v in copied_data.items() if v is not None}
|
||||
copied_agent = type(self)(**copied_data, llm=existing_llm, tools=self.tools)
|
||||
copied_agent = type(self)(
|
||||
**copied_data,
|
||||
llm=existing_llm,
|
||||
tools=self.tools,
|
||||
knowledge_sources=existing_knowledge_sources,
|
||||
knowledge=copied_knowledge,
|
||||
knowledge_storage=copied_knowledge_storage,
|
||||
)
|
||||
|
||||
return copied_agent
|
||||
|
||||
|
||||
@@ -95,18 +95,29 @@ class CrewAgentExecutorMixin:
|
||||
pass
|
||||
|
||||
def _ask_human_input(self, final_answer: str) -> str:
|
||||
"""Prompt human input for final decision making."""
|
||||
"""Prompt human input with mode-appropriate messaging."""
|
||||
self._printer.print(
|
||||
content=f"\033[1m\033[95m ## Final Result:\033[00m \033[92m{final_answer}\033[00m"
|
||||
)
|
||||
|
||||
self._printer.print(
|
||||
content=(
|
||||
# Training mode prompt (single iteration)
|
||||
if self.crew and getattr(self.crew, "_train", False):
|
||||
prompt = (
|
||||
"\n\n=====\n"
|
||||
"## Please provide feedback on the Final Result and the Agent's actions. "
|
||||
"Respond with 'looks good' or a similar phrase when you're satisfied.\n"
|
||||
"## TRAINING MODE: Provide feedback to improve the agent's performance.\n"
|
||||
"This will be used to train better versions of the agent.\n"
|
||||
"Please provide detailed feedback about the result quality and reasoning process.\n"
|
||||
"=====\n"
|
||||
),
|
||||
color="bold_yellow",
|
||||
)
|
||||
)
|
||||
# Regular human-in-the-loop prompt (multiple iterations)
|
||||
else:
|
||||
prompt = (
|
||||
"\n\n=====\n"
|
||||
"## HUMAN FEEDBACK: Provide feedback on the Final Result and Agent's actions.\n"
|
||||
"Respond with 'looks good' to accept or provide specific improvement requests.\n"
|
||||
"You can provide multiple rounds of feedback until satisfied.\n"
|
||||
"=====\n"
|
||||
)
|
||||
|
||||
self._printer.print(content=prompt, color="bold_yellow")
|
||||
return input()
|
||||
|
||||
@@ -100,6 +100,12 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
|
||||
try:
|
||||
formatted_answer = self._invoke_loop()
|
||||
except AssertionError:
|
||||
self._printer.print(
|
||||
content="Agent failed to reach a final answer. This is likely a bug - please report it.",
|
||||
color="red",
|
||||
)
|
||||
raise
|
||||
except Exception as e:
|
||||
if e.__class__.__module__.startswith("litellm"):
|
||||
# Do not retry on litellm errors
|
||||
@@ -115,7 +121,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self._create_long_term_memory(formatted_answer)
|
||||
return {"output": formatted_answer.output}
|
||||
|
||||
def _invoke_loop(self):
|
||||
def _invoke_loop(self) -> AgentFinish:
|
||||
"""
|
||||
Main loop to invoke the agent's thought process until it reaches a conclusion
|
||||
or the maximum number of iterations is reached.
|
||||
@@ -161,6 +167,11 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
finally:
|
||||
self.iterations += 1
|
||||
|
||||
# During the invoke loop, formatted_answer alternates between AgentAction
|
||||
# (when the agent is using tools) and eventually becomes AgentFinish
|
||||
# (when the agent reaches a final answer). This assertion confirms we've
|
||||
# reached a final answer and helps type checking understand this transition.
|
||||
assert isinstance(formatted_answer, AgentFinish)
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
|
||||
@@ -292,8 +303,11 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self._printer.print(
|
||||
content=f"\033[1m\033[95m# Agent:\033[00m \033[1m\033[92m{agent_role}\033[00m"
|
||||
)
|
||||
description = (
|
||||
getattr(self.task, "description") if self.task else "Not Found"
|
||||
)
|
||||
self._printer.print(
|
||||
content=f"\033[95m## Task:\033[00m \033[92m{self.task.description}\033[00m"
|
||||
content=f"\033[95m## Task:\033[00m \033[92m{description}\033[00m"
|
||||
)
|
||||
|
||||
def _show_logs(self, formatted_answer: Union[AgentAction, AgentFinish]):
|
||||
@@ -418,58 +432,50 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
)
|
||||
|
||||
def _handle_crew_training_output(
|
||||
self, result: AgentFinish, human_feedback: str | None = None
|
||||
self, result: AgentFinish, human_feedback: Optional[str] = None
|
||||
) -> None:
|
||||
"""Function to handle the process of the training data."""
|
||||
"""Handle the process of saving training data."""
|
||||
agent_id = str(self.agent.id) # type: ignore
|
||||
train_iteration = (
|
||||
getattr(self.crew, "_train_iteration", None) if self.crew else None
|
||||
)
|
||||
|
||||
if train_iteration is None or not isinstance(train_iteration, int):
|
||||
self._printer.print(
|
||||
content="Invalid or missing train iteration. Cannot save training data.",
|
||||
color="red",
|
||||
)
|
||||
return
|
||||
|
||||
# Load training data
|
||||
training_handler = CrewTrainingHandler(TRAINING_DATA_FILE)
|
||||
training_data = training_handler.load()
|
||||
training_data = training_handler.load() or {}
|
||||
|
||||
# Check if training data exists, human input is not requested, and self.crew is valid
|
||||
if training_data and not self.ask_for_human_input:
|
||||
if self.crew is not None and hasattr(self.crew, "_train_iteration"):
|
||||
train_iteration = self.crew._train_iteration
|
||||
if agent_id in training_data and isinstance(train_iteration, int):
|
||||
training_data[agent_id][train_iteration][
|
||||
"improved_output"
|
||||
] = result.output
|
||||
training_handler.save(training_data)
|
||||
else:
|
||||
self._printer.print(
|
||||
content="Invalid train iteration type or agent_id not in training data.",
|
||||
color="red",
|
||||
)
|
||||
else:
|
||||
self._printer.print(
|
||||
content="Crew is None or does not have _train_iteration attribute.",
|
||||
color="red",
|
||||
)
|
||||
# Initialize or retrieve agent's training data
|
||||
agent_training_data = training_data.get(agent_id, {})
|
||||
|
||||
if self.ask_for_human_input and human_feedback is not None:
|
||||
training_data = {
|
||||
if human_feedback is not None:
|
||||
# Save initial output and human feedback
|
||||
agent_training_data[train_iteration] = {
|
||||
"initial_output": result.output,
|
||||
"human_feedback": human_feedback,
|
||||
"agent": agent_id,
|
||||
"agent_role": self.agent.role, # type: ignore
|
||||
}
|
||||
if self.crew is not None and hasattr(self.crew, "_train_iteration"):
|
||||
train_iteration = self.crew._train_iteration
|
||||
if isinstance(train_iteration, int):
|
||||
CrewTrainingHandler(TRAINING_DATA_FILE).append(
|
||||
train_iteration, agent_id, training_data
|
||||
)
|
||||
else:
|
||||
self._printer.print(
|
||||
content="Invalid train iteration type. Expected int.",
|
||||
color="red",
|
||||
)
|
||||
else:
|
||||
# Save improved output
|
||||
if train_iteration in agent_training_data:
|
||||
agent_training_data[train_iteration]["improved_output"] = result.output
|
||||
else:
|
||||
self._printer.print(
|
||||
content="Crew is None or does not have _train_iteration attribute.",
|
||||
content=(
|
||||
f"No existing training data for agent {agent_id} and iteration "
|
||||
f"{train_iteration}. Cannot save improved output."
|
||||
),
|
||||
color="red",
|
||||
)
|
||||
return
|
||||
|
||||
# Update the training data and save
|
||||
training_data[agent_id] = agent_training_data
|
||||
training_handler.save(training_data)
|
||||
|
||||
def _format_prompt(self, prompt: str, inputs: Dict[str, str]) -> str:
|
||||
prompt = prompt.replace("{input}", inputs["input"])
|
||||
@@ -485,82 +491,103 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
return {"role": role, "content": prompt}
|
||||
|
||||
def _handle_human_feedback(self, formatted_answer: AgentFinish) -> AgentFinish:
|
||||
"""
|
||||
Handles the human feedback loop, allowing the user to provide feedback
|
||||
on the agent's output and determining if additional iterations are needed.
|
||||
"""Handle human feedback with different flows for training vs regular use.
|
||||
|
||||
Parameters:
|
||||
formatted_answer (AgentFinish): The initial output from the agent.
|
||||
Args:
|
||||
formatted_answer: The initial AgentFinish result to get feedback on
|
||||
|
||||
Returns:
|
||||
AgentFinish: The final output after incorporating human feedback.
|
||||
AgentFinish: The final answer after processing feedback
|
||||
"""
|
||||
human_feedback = self._ask_human_input(formatted_answer.output)
|
||||
|
||||
if self._is_training_mode():
|
||||
return self._handle_training_feedback(formatted_answer, human_feedback)
|
||||
|
||||
return self._handle_regular_feedback(formatted_answer, human_feedback)
|
||||
|
||||
def _is_training_mode(self) -> bool:
|
||||
"""Check if crew is in training mode."""
|
||||
return bool(self.crew and self.crew._train)
|
||||
|
||||
def _handle_training_feedback(
|
||||
self, initial_answer: AgentFinish, feedback: str
|
||||
) -> AgentFinish:
|
||||
"""Process feedback for training scenarios with single iteration."""
|
||||
self._printer.print(
|
||||
content="\nProcessing training feedback.\n",
|
||||
color="yellow",
|
||||
)
|
||||
self._handle_crew_training_output(initial_answer, feedback)
|
||||
self.messages.append(self._format_msg(f"Feedback: {feedback}"))
|
||||
improved_answer = self._invoke_loop()
|
||||
self._handle_crew_training_output(improved_answer)
|
||||
self.ask_for_human_input = False
|
||||
return improved_answer
|
||||
|
||||
def _handle_regular_feedback(
|
||||
self, current_answer: AgentFinish, initial_feedback: str
|
||||
) -> AgentFinish:
|
||||
"""Process feedback for regular use with potential multiple iterations."""
|
||||
feedback = initial_feedback
|
||||
answer = current_answer
|
||||
|
||||
while self.ask_for_human_input:
|
||||
human_feedback = self._ask_human_input(formatted_answer.output)
|
||||
response = self._get_llm_feedback_response(feedback)
|
||||
|
||||
if self.crew and self.crew._train:
|
||||
self._handle_crew_training_output(formatted_answer, human_feedback)
|
||||
|
||||
# Make an LLM call to verify if additional changes are requested based on human feedback
|
||||
additional_changes_prompt = self._i18n.slice(
|
||||
"human_feedback_classification"
|
||||
).format(feedback=human_feedback)
|
||||
|
||||
retry_count = 0
|
||||
llm_call_successful = False
|
||||
additional_changes_response = None
|
||||
|
||||
while retry_count < MAX_LLM_RETRY and not llm_call_successful:
|
||||
try:
|
||||
additional_changes_response = (
|
||||
self.llm.call(
|
||||
[
|
||||
self._format_msg(
|
||||
additional_changes_prompt, role="system"
|
||||
)
|
||||
],
|
||||
callbacks=self.callbacks,
|
||||
)
|
||||
.strip()
|
||||
.lower()
|
||||
)
|
||||
llm_call_successful = True
|
||||
except Exception as e:
|
||||
retry_count += 1
|
||||
|
||||
self._printer.print(
|
||||
content=f"Error during LLM call to classify human feedback: {e}. Retrying... ({retry_count}/{MAX_LLM_RETRY})",
|
||||
color="red",
|
||||
)
|
||||
|
||||
if not llm_call_successful:
|
||||
self._printer.print(
|
||||
content="Error processing feedback after multiple attempts.",
|
||||
color="red",
|
||||
)
|
||||
if not self._feedback_requires_changes(response):
|
||||
self.ask_for_human_input = False
|
||||
break
|
||||
|
||||
if additional_changes_response == "false":
|
||||
self.ask_for_human_input = False
|
||||
elif additional_changes_response == "true":
|
||||
self.ask_for_human_input = True
|
||||
# Add human feedback to messages
|
||||
self.messages.append(self._format_msg(f"Feedback: {human_feedback}"))
|
||||
# Invoke the loop again with updated messages
|
||||
formatted_answer = self._invoke_loop()
|
||||
|
||||
if self.crew and self.crew._train:
|
||||
self._handle_crew_training_output(formatted_answer)
|
||||
else:
|
||||
# Unexpected response
|
||||
self._printer.print(
|
||||
content=f"Unexpected response from LLM: '{additional_changes_response}'. Assuming no additional changes requested.",
|
||||
color="red",
|
||||
)
|
||||
self.ask_for_human_input = False
|
||||
answer = self._process_feedback_iteration(feedback)
|
||||
feedback = self._ask_human_input(answer.output)
|
||||
|
||||
return formatted_answer
|
||||
return answer
|
||||
|
||||
def _get_llm_feedback_response(self, feedback: str) -> Optional[str]:
|
||||
"""Get LLM classification of whether feedback requires changes."""
|
||||
prompt = self._i18n.slice("human_feedback_classification").format(
|
||||
feedback=feedback
|
||||
)
|
||||
message = self._format_msg(prompt, role="system")
|
||||
|
||||
for retry in range(MAX_LLM_RETRY):
|
||||
try:
|
||||
response = self.llm.call([message], callbacks=self.callbacks)
|
||||
return response.strip().lower() if response else None
|
||||
except Exception as error:
|
||||
self._log_feedback_error(retry, error)
|
||||
|
||||
self._log_max_retries_exceeded()
|
||||
return None
|
||||
|
||||
def _feedback_requires_changes(self, response: Optional[str]) -> bool:
|
||||
"""Determine if feedback response indicates need for changes."""
|
||||
return response == "true" if response else False
|
||||
|
||||
def _process_feedback_iteration(self, feedback: str) -> AgentFinish:
|
||||
"""Process a single feedback iteration."""
|
||||
self.messages.append(self._format_msg(f"Feedback: {feedback}"))
|
||||
return self._invoke_loop()
|
||||
|
||||
def _log_feedback_error(self, retry_count: int, error: Exception) -> None:
|
||||
"""Log feedback processing errors."""
|
||||
self._printer.print(
|
||||
content=(
|
||||
f"Error processing feedback: {error}. "
|
||||
f"Retrying... ({retry_count + 1}/{MAX_LLM_RETRY})"
|
||||
),
|
||||
color="red",
|
||||
)
|
||||
|
||||
def _log_max_retries_exceeded(self) -> None:
|
||||
"""Log when max retries for feedback processing are exceeded."""
|
||||
self._printer.print(
|
||||
content=(
|
||||
f"Failed to process feedback after {MAX_LLM_RETRY} attempts. "
|
||||
"Ending feedback loop."
|
||||
),
|
||||
color="red",
|
||||
)
|
||||
|
||||
def _handle_max_iterations_exceeded(self, formatted_answer):
|
||||
"""
|
||||
|
||||
@@ -4,6 +4,7 @@ import re
|
||||
import uuid
|
||||
import warnings
|
||||
from concurrent.futures import Future
|
||||
from copy import copy as shallow_copy
|
||||
from hashlib import md5
|
||||
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
|
||||
|
||||
@@ -210,8 +211,9 @@ class Crew(BaseModel):
|
||||
default=None,
|
||||
description="LLM used to handle chatting with the crew.",
|
||||
)
|
||||
_knowledge: Optional[Knowledge] = PrivateAttr(
|
||||
knowledge: Optional[Knowledge] = Field(
|
||||
default=None,
|
||||
description="Knowledge for the crew.",
|
||||
)
|
||||
|
||||
@field_validator("id", mode="before")
|
||||
@@ -289,7 +291,7 @@ class Crew(BaseModel):
|
||||
if isinstance(self.knowledge_sources, list) and all(
|
||||
isinstance(k, BaseKnowledgeSource) for k in self.knowledge_sources
|
||||
):
|
||||
self._knowledge = Knowledge(
|
||||
self.knowledge = Knowledge(
|
||||
sources=self.knowledge_sources,
|
||||
embedder_config=self.embedder,
|
||||
collection_name="crew",
|
||||
@@ -492,21 +494,26 @@ class Crew(BaseModel):
|
||||
train_crew = self.copy()
|
||||
train_crew._setup_for_training(filename)
|
||||
|
||||
for n_iteration in range(n_iterations):
|
||||
train_crew._train_iteration = n_iteration
|
||||
train_crew.kickoff(inputs=inputs)
|
||||
try:
|
||||
for n_iteration in range(n_iterations):
|
||||
train_crew._train_iteration = n_iteration
|
||||
train_crew.kickoff(inputs=inputs)
|
||||
|
||||
training_data = CrewTrainingHandler(TRAINING_DATA_FILE).load()
|
||||
training_data = CrewTrainingHandler(TRAINING_DATA_FILE).load()
|
||||
|
||||
for agent in train_crew.agents:
|
||||
if training_data.get(str(agent.id)):
|
||||
result = TaskEvaluator(agent).evaluate_training_data(
|
||||
training_data=training_data, agent_id=str(agent.id)
|
||||
)
|
||||
|
||||
CrewTrainingHandler(filename).save_trained_data(
|
||||
agent_id=str(agent.role), trained_data=result.model_dump()
|
||||
)
|
||||
for agent in train_crew.agents:
|
||||
if training_data.get(str(agent.id)):
|
||||
result = TaskEvaluator(agent).evaluate_training_data(
|
||||
training_data=training_data, agent_id=str(agent.id)
|
||||
)
|
||||
CrewTrainingHandler(filename).save_trained_data(
|
||||
agent_id=str(agent.role), trained_data=result.model_dump()
|
||||
)
|
||||
except Exception as e:
|
||||
self._logger.log("error", f"Training failed: {e}", color="red")
|
||||
CrewTrainingHandler(TRAINING_DATA_FILE).clear()
|
||||
CrewTrainingHandler(filename).clear()
|
||||
raise
|
||||
|
||||
def kickoff(
|
||||
self,
|
||||
@@ -991,8 +998,8 @@ class Crew(BaseModel):
|
||||
return result
|
||||
|
||||
def query_knowledge(self, query: List[str]) -> Union[List[Dict[str, Any]], None]:
|
||||
if self._knowledge:
|
||||
return self._knowledge.query(query)
|
||||
if self.knowledge:
|
||||
return self.knowledge.query(query)
|
||||
return None
|
||||
|
||||
def fetch_inputs(self) -> Set[str]:
|
||||
@@ -1036,6 +1043,8 @@ class Crew(BaseModel):
|
||||
"_telemetry",
|
||||
"agents",
|
||||
"tasks",
|
||||
"knowledge_sources",
|
||||
"knowledge",
|
||||
}
|
||||
|
||||
cloned_agents = [agent.copy() for agent in self.agents]
|
||||
@@ -1043,6 +1052,9 @@ class Crew(BaseModel):
|
||||
task_mapping = {}
|
||||
|
||||
cloned_tasks = []
|
||||
existing_knowledge_sources = shallow_copy(self.knowledge_sources)
|
||||
existing_knowledge = shallow_copy(self.knowledge)
|
||||
|
||||
for task in self.tasks:
|
||||
cloned_task = task.copy(cloned_agents, task_mapping)
|
||||
cloned_tasks.append(cloned_task)
|
||||
@@ -1062,7 +1074,13 @@ class Crew(BaseModel):
|
||||
copied_data.pop("agents", None)
|
||||
copied_data.pop("tasks", None)
|
||||
|
||||
copied_crew = Crew(**copied_data, agents=cloned_agents, tasks=cloned_tasks)
|
||||
copied_crew = Crew(
|
||||
**copied_data,
|
||||
agents=cloned_agents,
|
||||
tasks=cloned_tasks,
|
||||
knowledge_sources=existing_knowledge_sources,
|
||||
knowledge=existing_knowledge,
|
||||
)
|
||||
|
||||
return copied_crew
|
||||
|
||||
|
||||
@@ -15,20 +15,20 @@ class Knowledge(BaseModel):
|
||||
Args:
|
||||
sources: List[BaseKnowledgeSource] = Field(default_factory=list)
|
||||
storage: Optional[KnowledgeStorage] = Field(default=None)
|
||||
embedder_config: Optional[Dict[str, Any]] = None
|
||||
embedder: Optional[Dict[str, Any]] = None
|
||||
"""
|
||||
|
||||
sources: List[BaseKnowledgeSource] = Field(default_factory=list)
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
storage: Optional[KnowledgeStorage] = Field(default=None)
|
||||
embedder_config: Optional[Dict[str, Any]] = None
|
||||
embedder: Optional[Dict[str, Any]] = None
|
||||
collection_name: Optional[str] = None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
collection_name: str,
|
||||
sources: List[BaseKnowledgeSource],
|
||||
embedder_config: Optional[Dict[str, Any]] = None,
|
||||
embedder: Optional[Dict[str, Any]] = None,
|
||||
storage: Optional[KnowledgeStorage] = None,
|
||||
**data,
|
||||
):
|
||||
@@ -37,25 +37,23 @@ class Knowledge(BaseModel):
|
||||
self.storage = storage
|
||||
else:
|
||||
self.storage = KnowledgeStorage(
|
||||
embedder_config=embedder_config, collection_name=collection_name
|
||||
embedder=embedder, collection_name=collection_name
|
||||
)
|
||||
self.sources = sources
|
||||
self.storage.initialize_knowledge_storage()
|
||||
for source in sources:
|
||||
source.storage = self.storage
|
||||
source.add()
|
||||
self._add_sources()
|
||||
|
||||
def query(self, query: List[str], limit: int = 3) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Query across all knowledge sources to find the most relevant information.
|
||||
Returns the top_k most relevant chunks.
|
||||
|
||||
|
||||
Raises:
|
||||
ValueError: If storage is not initialized.
|
||||
"""
|
||||
if self.storage is None:
|
||||
raise ValueError("Storage is not initialized.")
|
||||
|
||||
|
||||
results = self.storage.search(
|
||||
query,
|
||||
limit,
|
||||
@@ -63,6 +61,9 @@ class Knowledge(BaseModel):
|
||||
return results
|
||||
|
||||
def _add_sources(self):
|
||||
for source in self.sources:
|
||||
source.storage = self.storage
|
||||
source.add()
|
||||
try:
|
||||
for source in self.sources:
|
||||
source.storage = self.storage
|
||||
source.add()
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
@@ -29,7 +29,13 @@ class BaseFileKnowledgeSource(BaseKnowledgeSource, ABC):
|
||||
def validate_file_path(cls, v, info):
|
||||
"""Validate that at least one of file_path or file_paths is provided."""
|
||||
# Single check if both are None, O(1) instead of nested conditions
|
||||
if v is None and info.data.get("file_path" if info.field_name == "file_paths" else "file_paths") is None:
|
||||
if (
|
||||
v is None
|
||||
and info.data.get(
|
||||
"file_path" if info.field_name == "file_paths" else "file_paths"
|
||||
)
|
||||
is None
|
||||
):
|
||||
raise ValueError("Either file_path or file_paths must be provided")
|
||||
return v
|
||||
|
||||
|
||||
@@ -48,11 +48,11 @@ class KnowledgeStorage(BaseKnowledgeStorage):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
embedder_config: Optional[Dict[str, Any]] = None,
|
||||
embedder: Optional[Dict[str, Any]] = None,
|
||||
collection_name: Optional[str] = None,
|
||||
):
|
||||
self.collection_name = collection_name
|
||||
self._set_embedder_config(embedder_config)
|
||||
self._set_embedder_config(embedder)
|
||||
|
||||
def search(
|
||||
self,
|
||||
@@ -99,7 +99,7 @@ class KnowledgeStorage(BaseKnowledgeStorage):
|
||||
)
|
||||
if self.app:
|
||||
self.collection = self.app.get_or_create_collection(
|
||||
name=collection_name, embedding_function=self.embedder_config
|
||||
name=collection_name, embedding_function=self.embedder
|
||||
)
|
||||
else:
|
||||
raise Exception("Vector Database Client not initialized")
|
||||
@@ -187,17 +187,15 @@ class KnowledgeStorage(BaseKnowledgeStorage):
|
||||
api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"
|
||||
)
|
||||
|
||||
def _set_embedder_config(
|
||||
self, embedder_config: Optional[Dict[str, Any]] = None
|
||||
) -> None:
|
||||
def _set_embedder_config(self, embedder: Optional[Dict[str, Any]] = None) -> None:
|
||||
"""Set the embedding configuration for the knowledge storage.
|
||||
|
||||
Args:
|
||||
embedder_config (Optional[Dict[str, Any]]): Configuration dictionary for the embedder.
|
||||
If None or empty, defaults to the default embedding function.
|
||||
"""
|
||||
self.embedder_config = (
|
||||
EmbeddingConfigurator().configure_embedder(embedder_config)
|
||||
if embedder_config
|
||||
self.embedder = (
|
||||
EmbeddingConfigurator().configure_embedder(embedder)
|
||||
if embedder
|
||||
else self._create_default_embedding_function()
|
||||
)
|
||||
|
||||
@@ -133,6 +133,7 @@ class LLM:
|
||||
logprobs: Optional[int] = None,
|
||||
top_logprobs: Optional[int] = None,
|
||||
base_url: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
api_version: Optional[str] = None,
|
||||
api_key: Optional[str] = None,
|
||||
callbacks: List[Any] = [],
|
||||
@@ -152,6 +153,7 @@ class LLM:
|
||||
self.logprobs = logprobs
|
||||
self.top_logprobs = top_logprobs
|
||||
self.base_url = base_url
|
||||
self.api_base = api_base
|
||||
self.api_version = api_version
|
||||
self.api_key = api_key
|
||||
self.callbacks = callbacks
|
||||
@@ -232,7 +234,8 @@ class LLM:
|
||||
"seed": self.seed,
|
||||
"logprobs": self.logprobs,
|
||||
"top_logprobs": self.top_logprobs,
|
||||
"api_base": self.base_url,
|
||||
"api_base": self.api_base,
|
||||
"base_url": self.base_url,
|
||||
"api_version": self.api_version,
|
||||
"api_key": self.api_key,
|
||||
"stream": False,
|
||||
|
||||
@@ -431,7 +431,9 @@ class Task(BaseModel):
|
||||
content = (
|
||||
json_output
|
||||
if json_output
|
||||
else pydantic_output.model_dump_json() if pydantic_output else result
|
||||
else pydantic_output.model_dump_json()
|
||||
if pydantic_output
|
||||
else result
|
||||
)
|
||||
self._save_file(content)
|
||||
|
||||
@@ -452,7 +454,7 @@ class Task(BaseModel):
|
||||
return "\n".join(tasks_slices)
|
||||
|
||||
def interpolate_inputs_and_add_conversation_history(
|
||||
self, inputs: Dict[str, Union[str, int, float]]
|
||||
self, inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]]
|
||||
) -> None:
|
||||
"""Interpolate inputs into the task description, expected output, and output file path.
|
||||
Add conversation history if present.
|
||||
@@ -524,7 +526,9 @@ class Task(BaseModel):
|
||||
)
|
||||
|
||||
def interpolate_only(
|
||||
self, input_string: Optional[str], inputs: Dict[str, Union[str, int, float]]
|
||||
self,
|
||||
input_string: Optional[str],
|
||||
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]],
|
||||
) -> str:
|
||||
"""Interpolate placeholders (e.g., {key}) in a string while leaving JSON untouched.
|
||||
|
||||
@@ -532,17 +536,39 @@ class Task(BaseModel):
|
||||
input_string: The string containing template variables to interpolate.
|
||||
Can be None or empty, in which case an empty string is returned.
|
||||
inputs: Dictionary mapping template variables to their values.
|
||||
Supported value types are strings, integers, and floats.
|
||||
If input_string is empty or has no placeholders, inputs can be empty.
|
||||
Supported value types are strings, integers, floats, and dicts/lists
|
||||
containing only these types and other nested dicts/lists.
|
||||
|
||||
Returns:
|
||||
The interpolated string with all template variables replaced with their values.
|
||||
Empty string if input_string is None or empty.
|
||||
|
||||
Raises:
|
||||
ValueError: If a required template variable is missing from inputs.
|
||||
KeyError: If a template variable is not found in the inputs dictionary.
|
||||
ValueError: If a value contains unsupported types
|
||||
"""
|
||||
|
||||
# Validation function for recursive type checking
|
||||
def validate_type(value: Any) -> None:
|
||||
if value is None:
|
||||
return
|
||||
if isinstance(value, (str, int, float, bool)):
|
||||
return
|
||||
if isinstance(value, (dict, list)):
|
||||
for item in value.values() if isinstance(value, dict) else value:
|
||||
validate_type(item)
|
||||
return
|
||||
raise ValueError(
|
||||
f"Unsupported type {type(value).__name__} in inputs. "
|
||||
"Only str, int, float, bool, dict, and list are allowed."
|
||||
)
|
||||
|
||||
# Validate all input values
|
||||
for key, value in inputs.items():
|
||||
try:
|
||||
validate_type(value)
|
||||
except ValueError as e:
|
||||
raise ValueError(f"Invalid value for key '{key}': {str(e)}") from e
|
||||
|
||||
if input_string is None or not input_string:
|
||||
return ""
|
||||
if "{" not in input_string and "}" not in input_string:
|
||||
@@ -551,15 +577,7 @@ class Task(BaseModel):
|
||||
raise ValueError(
|
||||
"Inputs dictionary cannot be empty when interpolating variables"
|
||||
)
|
||||
|
||||
try:
|
||||
# Validate input types
|
||||
for key, value in inputs.items():
|
||||
if not isinstance(value, (str, int, float)):
|
||||
raise ValueError(
|
||||
f"Value for key '{key}' must be a string, integer, or float, got {type(value).__name__}"
|
||||
)
|
||||
|
||||
escaped_string = input_string.replace("{", "{{").replace("}", "}}")
|
||||
|
||||
for key in inputs.keys():
|
||||
|
||||
@@ -43,7 +43,6 @@ class EmbeddingConfigurator:
|
||||
raise Exception(
|
||||
f"Unsupported embedding provider: {provider}, supported providers: {list(self.embedding_functions.keys())}"
|
||||
)
|
||||
|
||||
return self.embedding_functions[provider](config, model_name)
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -92,13 +92,34 @@ class TaskEvaluator:
|
||||
"""
|
||||
|
||||
output_training_data = training_data[agent_id]
|
||||
|
||||
final_aggregated_data = ""
|
||||
for _, data in output_training_data.items():
|
||||
|
||||
for iteration, data in output_training_data.items():
|
||||
improved_output = data.get("improved_output")
|
||||
initial_output = data.get("initial_output")
|
||||
human_feedback = data.get("human_feedback")
|
||||
|
||||
if not all([improved_output, initial_output, human_feedback]):
|
||||
missing_fields = [
|
||||
field
|
||||
for field in ["improved_output", "initial_output", "human_feedback"]
|
||||
if not data.get(field)
|
||||
]
|
||||
error_msg = (
|
||||
f"Critical training data error: Missing fields ({', '.join(missing_fields)}) "
|
||||
f"for agent {agent_id} in iteration {iteration}.\n"
|
||||
"This indicates a broken training process. "
|
||||
"Cannot proceed with evaluation.\n"
|
||||
"Please check your training implementation."
|
||||
)
|
||||
raise ValueError(error_msg)
|
||||
|
||||
final_aggregated_data += (
|
||||
f"Initial Output:\n{data.get('initial_output', '')}\n\n"
|
||||
f"Human Feedback:\n{data.get('human_feedback', '')}\n\n"
|
||||
f"Improved Output:\n{data.get('improved_output', '')}\n\n"
|
||||
f"Iteration: {iteration}\n"
|
||||
f"Initial Output:\n{initial_output}\n\n"
|
||||
f"Human Feedback:\n{human_feedback}\n\n"
|
||||
f"Improved Output:\n{improved_output}\n\n"
|
||||
"------------------------------------------------\n\n"
|
||||
)
|
||||
|
||||
evaluation_query = (
|
||||
|
||||
@@ -53,6 +53,7 @@ def create_llm(
|
||||
timeout: Optional[float] = getattr(llm_value, "timeout", None)
|
||||
api_key: Optional[str] = getattr(llm_value, "api_key", None)
|
||||
base_url: Optional[str] = getattr(llm_value, "base_url", None)
|
||||
api_base: Optional[str] = getattr(llm_value, "api_base", None)
|
||||
|
||||
created_llm = LLM(
|
||||
model=model,
|
||||
@@ -62,6 +63,7 @@ def create_llm(
|
||||
timeout=timeout,
|
||||
api_key=api_key,
|
||||
base_url=base_url,
|
||||
api_base=api_base,
|
||||
)
|
||||
return created_llm
|
||||
except Exception as e:
|
||||
@@ -101,8 +103,18 @@ def _llm_via_environment_or_fallback() -> Optional[LLM]:
|
||||
callbacks: List[Any] = []
|
||||
|
||||
# Optional base URL from env
|
||||
api_base = os.environ.get("OPENAI_API_BASE") or os.environ.get("OPENAI_BASE_URL")
|
||||
if api_base:
|
||||
base_url = (
|
||||
os.environ.get("BASE_URL")
|
||||
or os.environ.get("OPENAI_API_BASE")
|
||||
or os.environ.get("OPENAI_BASE_URL")
|
||||
)
|
||||
|
||||
api_base = os.environ.get("API_BASE") or os.environ.get("AZURE_API_BASE")
|
||||
|
||||
# Synchronize base_url and api_base if one is populated and the other is not
|
||||
if base_url and not api_base:
|
||||
api_base = base_url
|
||||
elif api_base and not base_url:
|
||||
base_url = api_base
|
||||
|
||||
# Initialize llm_params dictionary
|
||||
@@ -115,6 +127,7 @@ def _llm_via_environment_or_fallback() -> Optional[LLM]:
|
||||
"timeout": timeout,
|
||||
"api_key": api_key,
|
||||
"base_url": base_url,
|
||||
"api_base": api_base,
|
||||
"api_version": api_version,
|
||||
"presence_penalty": presence_penalty,
|
||||
"frequency_penalty": frequency_penalty,
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
import os
|
||||
|
||||
from crewai.utilities.file_handler import PickleHandler
|
||||
|
||||
|
||||
@@ -29,3 +31,10 @@ class CrewTrainingHandler(PickleHandler):
|
||||
data[agent_id] = {train_iteration: new_data}
|
||||
|
||||
self.save(data)
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear the training data by removing the file or resetting its contents."""
|
||||
if os.path.exists(self.file_path):
|
||||
with open(self.file_path, "wb") as file:
|
||||
# Overwrite with an empty dictionary
|
||||
self.save({})
|
||||
|
||||
@@ -10,13 +10,14 @@ from crewai import Agent, Crew, Task
|
||||
from crewai.agents.cache import CacheHandler
|
||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||
from crewai.agents.parser import AgentAction, CrewAgentParser, OutputParserException
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
|
||||
from crewai.llm import LLM
|
||||
from crewai.tools import tool
|
||||
from crewai.tools.tool_calling import InstructorToolCalling
|
||||
from crewai.tools.tool_usage import ToolUsage
|
||||
from crewai.tools.tool_usage_events import ToolUsageFinished
|
||||
from crewai.utilities import Printer, RPMController
|
||||
from crewai.utilities import RPMController
|
||||
from crewai.utilities.events import Emitter
|
||||
|
||||
|
||||
@@ -1602,6 +1603,45 @@ def test_agent_with_knowledge_sources():
|
||||
assert "red" in result.raw.lower()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_with_knowledge_sources_works_with_copy():
|
||||
content = "Brandon's favorite color is red and he likes Mexican food."
|
||||
string_source = StringKnowledgeSource(content=content)
|
||||
|
||||
with patch(
|
||||
"crewai.knowledge.source.base_knowledge_source.BaseKnowledgeSource",
|
||||
autospec=True,
|
||||
) as MockKnowledgeSource:
|
||||
mock_knowledge_source_instance = MockKnowledgeSource.return_value
|
||||
mock_knowledge_source_instance.__class__ = BaseKnowledgeSource
|
||||
mock_knowledge_source_instance.sources = [string_source]
|
||||
|
||||
agent = Agent(
|
||||
role="Information Agent",
|
||||
goal="Provide information based on knowledge sources",
|
||||
backstory="You have access to specific knowledge sources.",
|
||||
llm=LLM(model="gpt-4o-mini"),
|
||||
knowledge_sources=[string_source],
|
||||
)
|
||||
|
||||
with patch(
|
||||
"crewai.knowledge.storage.knowledge_storage.KnowledgeStorage"
|
||||
) as MockKnowledgeStorage:
|
||||
mock_knowledge_storage = MockKnowledgeStorage.return_value
|
||||
agent.knowledge_storage = mock_knowledge_storage
|
||||
|
||||
agent_copy = agent.copy()
|
||||
|
||||
assert agent_copy.role == agent.role
|
||||
assert agent_copy.goal == agent.goal
|
||||
assert agent_copy.backstory == agent.backstory
|
||||
assert agent_copy.knowledge_sources is not None
|
||||
assert len(agent_copy.knowledge_sources) == 1
|
||||
assert isinstance(agent_copy.knowledge_sources[0], StringKnowledgeSource)
|
||||
assert agent_copy.knowledge_sources[0].content == content
|
||||
assert isinstance(agent_copy.llm, LLM)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_litellm_auth_error_handling():
|
||||
"""Test that LiteLLM authentication errors are handled correctly and not retried."""
|
||||
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -14,6 +14,7 @@ from crewai.agent import Agent
|
||||
from crewai.agents.cache import CacheHandler
|
||||
from crewai.crew import Crew
|
||||
from crewai.crews.crew_output import CrewOutput
|
||||
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
|
||||
from crewai.memory.contextual.contextual_memory import ContextualMemory
|
||||
from crewai.process import Process
|
||||
from crewai.project import crew
|
||||
@@ -555,12 +556,12 @@ def test_crew_with_delegating_agents_should_not_override_task_tools():
|
||||
_, kwargs = mock_execute_sync.call_args
|
||||
tools = kwargs["tools"]
|
||||
|
||||
assert any(
|
||||
isinstance(tool, TestTool) for tool in tools
|
||||
), "TestTool should be present"
|
||||
assert any(
|
||||
"delegate" in tool.name.lower() for tool in tools
|
||||
), "Delegation tool should be present"
|
||||
assert any(isinstance(tool, TestTool) for tool in tools), (
|
||||
"TestTool should be present"
|
||||
)
|
||||
assert any("delegate" in tool.name.lower() for tool in tools), (
|
||||
"Delegation tool should be present"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -619,12 +620,12 @@ def test_crew_with_delegating_agents_should_not_override_agent_tools():
|
||||
_, kwargs = mock_execute_sync.call_args
|
||||
tools = kwargs["tools"]
|
||||
|
||||
assert any(
|
||||
isinstance(tool, TestTool) for tool in new_ceo.tools
|
||||
), "TestTool should be present"
|
||||
assert any(
|
||||
"delegate" in tool.name.lower() for tool in tools
|
||||
), "Delegation tool should be present"
|
||||
assert any(isinstance(tool, TestTool) for tool in new_ceo.tools), (
|
||||
"TestTool should be present"
|
||||
)
|
||||
assert any("delegate" in tool.name.lower() for tool in tools), (
|
||||
"Delegation tool should be present"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -748,17 +749,17 @@ def test_task_tools_override_agent_tools_with_allow_delegation():
|
||||
used_tools = kwargs["tools"]
|
||||
|
||||
# Confirm AnotherTestTool is present but TestTool is not
|
||||
assert any(
|
||||
isinstance(tool, AnotherTestTool) for tool in used_tools
|
||||
), "AnotherTestTool should be present"
|
||||
assert not any(
|
||||
isinstance(tool, TestTool) for tool in used_tools
|
||||
), "TestTool should not be present among used tools"
|
||||
assert any(isinstance(tool, AnotherTestTool) for tool in used_tools), (
|
||||
"AnotherTestTool should be present"
|
||||
)
|
||||
assert not any(isinstance(tool, TestTool) for tool in used_tools), (
|
||||
"TestTool should not be present among used tools"
|
||||
)
|
||||
|
||||
# Confirm delegation tool(s) are present
|
||||
assert any(
|
||||
"delegate" in tool.name.lower() for tool in used_tools
|
||||
), "Delegation tool should be present"
|
||||
assert any("delegate" in tool.name.lower() for tool in used_tools), (
|
||||
"Delegation tool should be present"
|
||||
)
|
||||
|
||||
# Finally, make sure the agent's original tools remain unchanged
|
||||
assert len(researcher_with_delegation.tools) == 1
|
||||
@@ -1466,7 +1467,6 @@ def test_dont_set_agents_step_callback_if_already_set():
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_function_calling_llm():
|
||||
|
||||
from crewai import LLM
|
||||
from crewai.tools import tool
|
||||
|
||||
@@ -1560,9 +1560,9 @@ def test_code_execution_flag_adds_code_tool_upon_kickoff():
|
||||
|
||||
# Verify that exactly one tool was used and it was a CodeInterpreterTool
|
||||
assert len(used_tools) == 1, "Should have exactly one tool"
|
||||
assert isinstance(
|
||||
used_tools[0], CodeInterpreterTool
|
||||
), "Tool should be CodeInterpreterTool"
|
||||
assert isinstance(used_tools[0], CodeInterpreterTool), (
|
||||
"Tool should be CodeInterpreterTool"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -3107,9 +3107,9 @@ def test_fetch_inputs():
|
||||
expected_placeholders = {"role_detail", "topic", "field"}
|
||||
actual_placeholders = crew.fetch_inputs()
|
||||
|
||||
assert (
|
||||
actual_placeholders == expected_placeholders
|
||||
), f"Expected {expected_placeholders}, but got {actual_placeholders}"
|
||||
assert actual_placeholders == expected_placeholders, (
|
||||
f"Expected {expected_placeholders}, but got {actual_placeholders}"
|
||||
)
|
||||
|
||||
|
||||
def test_task_tools_preserve_code_execution_tools():
|
||||
@@ -3182,20 +3182,20 @@ def test_task_tools_preserve_code_execution_tools():
|
||||
used_tools = kwargs["tools"]
|
||||
|
||||
# Verify all expected tools are present
|
||||
assert any(
|
||||
isinstance(tool, TestTool) for tool in used_tools
|
||||
), "Task's TestTool should be present"
|
||||
assert any(
|
||||
isinstance(tool, CodeInterpreterTool) for tool in used_tools
|
||||
), "CodeInterpreterTool should be present"
|
||||
assert any(
|
||||
"delegate" in tool.name.lower() for tool in used_tools
|
||||
), "Delegation tool should be present"
|
||||
assert any(isinstance(tool, TestTool) for tool in used_tools), (
|
||||
"Task's TestTool should be present"
|
||||
)
|
||||
assert any(isinstance(tool, CodeInterpreterTool) for tool in used_tools), (
|
||||
"CodeInterpreterTool should be present"
|
||||
)
|
||||
assert any("delegate" in tool.name.lower() for tool in used_tools), (
|
||||
"Delegation tool should be present"
|
||||
)
|
||||
|
||||
# Verify the total number of tools (TestTool + CodeInterpreter + 2 delegation tools)
|
||||
assert (
|
||||
len(used_tools) == 4
|
||||
), "Should have TestTool, CodeInterpreter, and 2 delegation tools"
|
||||
assert len(used_tools) == 4, (
|
||||
"Should have TestTool, CodeInterpreter, and 2 delegation tools"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -3239,9 +3239,9 @@ def test_multimodal_flag_adds_multimodal_tools():
|
||||
used_tools = kwargs["tools"]
|
||||
|
||||
# Check that the multimodal tool was added
|
||||
assert any(
|
||||
isinstance(tool, AddImageTool) for tool in used_tools
|
||||
), "AddImageTool should be present when agent is multimodal"
|
||||
assert any(isinstance(tool, AddImageTool) for tool in used_tools), (
|
||||
"AddImageTool should be present when agent is multimodal"
|
||||
)
|
||||
|
||||
# Verify we have exactly one tool (just the AddImageTool)
|
||||
assert len(used_tools) == 1, "Should only have the AddImageTool"
|
||||
@@ -3467,9 +3467,9 @@ def test_crew_guardrail_feedback_in_context():
|
||||
assert len(execution_contexts) > 1, "Task should have been executed multiple times"
|
||||
|
||||
# Verify that the second execution included the guardrail feedback
|
||||
assert (
|
||||
"Output must contain the keyword 'IMPORTANT'" in execution_contexts[1]
|
||||
), "Guardrail feedback should be included in retry context"
|
||||
assert "Output must contain the keyword 'IMPORTANT'" in execution_contexts[1], (
|
||||
"Guardrail feedback should be included in retry context"
|
||||
)
|
||||
|
||||
# Verify final output meets guardrail requirements
|
||||
assert "IMPORTANT" in result.raw, "Final output should contain required keyword"
|
||||
@@ -3494,7 +3494,6 @@ def test_before_kickoff_callback():
|
||||
|
||||
@before_kickoff
|
||||
def modify_inputs(self, inputs):
|
||||
|
||||
self.inputs_modified = True
|
||||
inputs["modified"] = True
|
||||
return inputs
|
||||
@@ -3596,3 +3595,21 @@ def test_before_kickoff_without_inputs():
|
||||
# Verify that the inputs were initialized and modified inside the before_kickoff method
|
||||
assert test_crew_instance.received_inputs is not None
|
||||
assert test_crew_instance.received_inputs.get("modified") is True
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_with_knowledge_sources_works_with_copy():
|
||||
content = "Brandon's favorite color is red and he likes Mexican food."
|
||||
string_source = StringKnowledgeSource(content=content)
|
||||
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
tasks=[Task(description="test", expected_output="test", agent=researcher)],
|
||||
knowledge_sources=[string_source],
|
||||
)
|
||||
|
||||
crew_copy = crew.copy()
|
||||
|
||||
assert crew_copy.knowledge_sources == crew.knowledge_sources
|
||||
assert len(crew_copy.agents) == len(crew.agents)
|
||||
assert len(crew_copy.tasks) == len(crew.tasks)
|
||||
|
||||
@@ -779,6 +779,43 @@ def test_interpolate_only():
|
||||
assert result == no_placeholders
|
||||
|
||||
|
||||
def test_interpolate_only_with_dict_inside_expected_output():
|
||||
"""Test the interpolate_only method for various scenarios including JSON structure preservation."""
|
||||
task = Task(
|
||||
description="Unused in this test",
|
||||
expected_output="Unused in this test: {questions}",
|
||||
)
|
||||
|
||||
json_string = '{"questions": {"main_question": "What is the user\'s name?", "secondary_question": "What is the user\'s age?"}}'
|
||||
result = task.interpolate_only(
|
||||
input_string=json_string,
|
||||
inputs={
|
||||
"questions": {
|
||||
"main_question": "What is the user's name?",
|
||||
"secondary_question": "What is the user's age?",
|
||||
}
|
||||
},
|
||||
)
|
||||
assert '"main_question": "What is the user\'s name?"' in result
|
||||
assert '"secondary_question": "What is the user\'s age?"' in result
|
||||
assert result == json_string
|
||||
|
||||
normal_string = "Hello {name}, welcome to {place}!"
|
||||
result = task.interpolate_only(
|
||||
input_string=normal_string, inputs={"name": "John", "place": "CrewAI"}
|
||||
)
|
||||
assert result == "Hello John, welcome to CrewAI!"
|
||||
|
||||
result = task.interpolate_only(input_string="", inputs={"unused": "value"})
|
||||
assert result == ""
|
||||
|
||||
no_placeholders = "Hello, this is a test"
|
||||
result = task.interpolate_only(
|
||||
input_string=no_placeholders, inputs={"unused": "value"}
|
||||
)
|
||||
assert result == no_placeholders
|
||||
|
||||
|
||||
def test_task_output_str_with_pydantic():
|
||||
from crewai.tasks.output_format import OutputFormat
|
||||
|
||||
@@ -966,3 +1003,283 @@ def test_task_execution_times():
|
||||
assert task.start_time is not None
|
||||
assert task.end_time is not None
|
||||
assert task.execution_duration == (task.end_time - task.start_time).total_seconds()
|
||||
|
||||
|
||||
def test_interpolate_with_list_of_strings():
|
||||
task = Task(
|
||||
description="Test list interpolation",
|
||||
expected_output="List: {items}",
|
||||
)
|
||||
|
||||
# Test simple list of strings
|
||||
input_str = "Available items: {items}"
|
||||
inputs = {"items": ["apple", "banana", "cherry"]}
|
||||
result = task.interpolate_only(input_str, inputs)
|
||||
assert result == f"Available items: {inputs['items']}"
|
||||
|
||||
# Test empty list
|
||||
empty_list_input = {"items": []}
|
||||
result = task.interpolate_only(input_str, empty_list_input)
|
||||
assert result == "Available items: []"
|
||||
|
||||
|
||||
def test_interpolate_with_list_of_dicts():
|
||||
task = Task(
|
||||
description="Test list of dicts interpolation",
|
||||
expected_output="People: {people}",
|
||||
)
|
||||
|
||||
input_data = {
|
||||
"people": [
|
||||
{"name": "Alice", "age": 30, "skills": ["Python", "AI"]},
|
||||
{"name": "Bob", "age": 25, "skills": ["Java", "Cloud"]},
|
||||
]
|
||||
}
|
||||
result = task.interpolate_only("{people}", input_data)
|
||||
|
||||
parsed_result = eval(result)
|
||||
assert isinstance(parsed_result, list)
|
||||
assert len(parsed_result) == 2
|
||||
assert parsed_result[0]["name"] == "Alice"
|
||||
assert parsed_result[0]["age"] == 30
|
||||
assert parsed_result[0]["skills"] == ["Python", "AI"]
|
||||
assert parsed_result[1]["name"] == "Bob"
|
||||
assert parsed_result[1]["age"] == 25
|
||||
assert parsed_result[1]["skills"] == ["Java", "Cloud"]
|
||||
|
||||
|
||||
def test_interpolate_with_nested_structures():
|
||||
task = Task(
|
||||
description="Test nested structures",
|
||||
expected_output="Company: {company}",
|
||||
)
|
||||
|
||||
input_data = {
|
||||
"company": {
|
||||
"name": "TechCorp",
|
||||
"departments": [
|
||||
{
|
||||
"name": "Engineering",
|
||||
"employees": 50,
|
||||
"tools": ["Git", "Docker", "Kubernetes"],
|
||||
},
|
||||
{"name": "Sales", "employees": 20, "regions": {"north": 5, "south": 3}},
|
||||
],
|
||||
}
|
||||
}
|
||||
result = task.interpolate_only("{company}", input_data)
|
||||
parsed = eval(result)
|
||||
|
||||
assert parsed["name"] == "TechCorp"
|
||||
assert len(parsed["departments"]) == 2
|
||||
assert parsed["departments"][0]["tools"] == ["Git", "Docker", "Kubernetes"]
|
||||
assert parsed["departments"][1]["regions"]["north"] == 5
|
||||
|
||||
|
||||
def test_interpolate_with_special_characters():
|
||||
task = Task(
|
||||
description="Test special characters in dicts",
|
||||
expected_output="Data: {special_data}",
|
||||
)
|
||||
|
||||
input_data = {
|
||||
"special_data": {
|
||||
"quotes": """This has "double" and 'single' quotes""",
|
||||
"unicode": "文字化けテスト",
|
||||
"symbols": "!@#$%^&*()",
|
||||
"empty": "",
|
||||
}
|
||||
}
|
||||
result = task.interpolate_only("{special_data}", input_data)
|
||||
parsed = eval(result)
|
||||
|
||||
assert parsed["quotes"] == """This has "double" and 'single' quotes"""
|
||||
assert parsed["unicode"] == "文字化けテスト"
|
||||
assert parsed["symbols"] == "!@#$%^&*()"
|
||||
assert parsed["empty"] == ""
|
||||
|
||||
|
||||
def test_interpolate_mixed_types():
|
||||
task = Task(
|
||||
description="Test mixed type interpolation",
|
||||
expected_output="Mixed: {data}",
|
||||
)
|
||||
|
||||
input_data = {
|
||||
"data": {
|
||||
"name": "Test Dataset",
|
||||
"samples": 1000,
|
||||
"features": ["age", "income", "location"],
|
||||
"metadata": {
|
||||
"source": "public",
|
||||
"validated": True,
|
||||
"tags": ["demo", "test", "temp"],
|
||||
},
|
||||
}
|
||||
}
|
||||
result = task.interpolate_only("{data}", input_data)
|
||||
parsed = eval(result)
|
||||
|
||||
assert parsed["name"] == "Test Dataset"
|
||||
assert parsed["samples"] == 1000
|
||||
assert parsed["metadata"]["tags"] == ["demo", "test", "temp"]
|
||||
|
||||
|
||||
def test_interpolate_complex_combination():
|
||||
task = Task(
|
||||
description="Test complex combination",
|
||||
expected_output="Report: {report}",
|
||||
)
|
||||
|
||||
input_data = {
|
||||
"report": [
|
||||
{
|
||||
"month": "January",
|
||||
"metrics": {"sales": 15000, "expenses": 8000, "profit": 7000},
|
||||
"top_products": ["Product A", "Product B"],
|
||||
},
|
||||
{
|
||||
"month": "February",
|
||||
"metrics": {"sales": 18000, "expenses": 8500, "profit": 9500},
|
||||
"top_products": ["Product C", "Product D"],
|
||||
},
|
||||
]
|
||||
}
|
||||
result = task.interpolate_only("{report}", input_data)
|
||||
parsed = eval(result)
|
||||
|
||||
assert len(parsed) == 2
|
||||
assert parsed[0]["month"] == "January"
|
||||
assert parsed[1]["metrics"]["profit"] == 9500
|
||||
assert "Product D" in parsed[1]["top_products"]
|
||||
|
||||
|
||||
def test_interpolate_invalid_type_validation():
|
||||
task = Task(
|
||||
description="Test invalid type validation",
|
||||
expected_output="Should never reach here",
|
||||
)
|
||||
|
||||
# Test with invalid top-level type
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
task.interpolate_only("{data}", {"data": set()}) # type: ignore we are purposely testing this failure
|
||||
|
||||
assert "Unsupported type set" in str(excinfo.value)
|
||||
|
||||
# Test with invalid nested type
|
||||
invalid_nested = {
|
||||
"profile": {
|
||||
"name": "John",
|
||||
"age": 30,
|
||||
"tags": {"a", "b", "c"}, # Set is invalid
|
||||
}
|
||||
}
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
task.interpolate_only("{data}", {"data": invalid_nested})
|
||||
assert "Unsupported type set" in str(excinfo.value)
|
||||
|
||||
|
||||
def test_interpolate_custom_object_validation():
|
||||
task = Task(
|
||||
description="Test custom object rejection",
|
||||
expected_output="Should never reach here",
|
||||
)
|
||||
|
||||
class CustomObject:
|
||||
def __init__(self, value):
|
||||
self.value = value
|
||||
|
||||
def __str__(self):
|
||||
return str(self.value)
|
||||
|
||||
# Test with custom object at top level
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
task.interpolate_only("{obj}", {"obj": CustomObject(5)}) # type: ignore we are purposely testing this failure
|
||||
assert "Unsupported type CustomObject" in str(excinfo.value)
|
||||
|
||||
# Test with nested custom object in dictionary
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
task.interpolate_only(
|
||||
"{data}", {"data": {"valid": 1, "invalid": CustomObject(5)}}
|
||||
)
|
||||
assert "Unsupported type CustomObject" in str(excinfo.value)
|
||||
|
||||
# Test with nested custom object in list
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
task.interpolate_only("{data}", {"data": [1, "valid", CustomObject(5)]})
|
||||
assert "Unsupported type CustomObject" in str(excinfo.value)
|
||||
|
||||
# Test with deeply nested custom object
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
task.interpolate_only(
|
||||
"{data}", {"data": {"level1": {"level2": [{"level3": CustomObject(5)}]}}}
|
||||
)
|
||||
assert "Unsupported type CustomObject" in str(excinfo.value)
|
||||
|
||||
|
||||
def test_interpolate_valid_complex_types():
|
||||
task = Task(
|
||||
description="Test valid complex types",
|
||||
expected_output="Validation should pass",
|
||||
)
|
||||
|
||||
# Valid complex structure
|
||||
valid_data = {
|
||||
"name": "Valid Dataset",
|
||||
"stats": {
|
||||
"count": 1000,
|
||||
"distribution": [0.2, 0.3, 0.5],
|
||||
"features": ["age", "income"],
|
||||
"nested": {"deep": [1, 2, 3], "deeper": {"a": 1, "b": 2.5}},
|
||||
},
|
||||
}
|
||||
|
||||
# Should not raise any errors
|
||||
result = task.interpolate_only("{data}", {"data": valid_data})
|
||||
parsed = eval(result)
|
||||
assert parsed["name"] == "Valid Dataset"
|
||||
assert parsed["stats"]["nested"]["deeper"]["b"] == 2.5
|
||||
|
||||
|
||||
def test_interpolate_edge_cases():
|
||||
task = Task(
|
||||
description="Test edge cases",
|
||||
expected_output="Edge case handling",
|
||||
)
|
||||
|
||||
# Test empty dict and list
|
||||
assert task.interpolate_only("{}", {"data": {}}) == "{}"
|
||||
assert task.interpolate_only("[]", {"data": []}) == "[]"
|
||||
|
||||
# Test numeric types
|
||||
assert task.interpolate_only("{num}", {"num": 42}) == "42"
|
||||
assert task.interpolate_only("{num}", {"num": 3.14}) == "3.14"
|
||||
|
||||
# Test boolean values (valid JSON types)
|
||||
assert task.interpolate_only("{flag}", {"flag": True}) == "True"
|
||||
assert task.interpolate_only("{flag}", {"flag": False}) == "False"
|
||||
|
||||
|
||||
def test_interpolate_valid_types():
|
||||
task = Task(
|
||||
description="Test valid types including null and boolean",
|
||||
expected_output="Should pass validation",
|
||||
)
|
||||
|
||||
# Test with boolean and null values (valid JSON types)
|
||||
valid_data = {
|
||||
"name": "Test",
|
||||
"active": True,
|
||||
"deleted": False,
|
||||
"optional": None,
|
||||
"nested": {"flag": True, "empty": None},
|
||||
}
|
||||
|
||||
result = task.interpolate_only("{data}", {"data": valid_data})
|
||||
parsed = eval(result)
|
||||
|
||||
assert parsed["active"] is True
|
||||
assert parsed["deleted"] is False
|
||||
assert parsed["optional"] is None
|
||||
assert parsed["nested"]["flag"] is True
|
||||
assert parsed["nested"]["empty"] is None
|
||||
|
||||
@@ -48,9 +48,9 @@ def test_evaluate_training_data(converter_mock):
|
||||
mock.call(
|
||||
llm=original_agent.llm,
|
||||
text="Assess the quality of the training data based on the llm output, human feedback , and llm "
|
||||
"output improved result.\n\nInitial Output:\nInitial output 1\n\nHuman Feedback:\nHuman feedback "
|
||||
"1\n\nImproved Output:\nImproved output 1\n\nInitial Output:\nInitial output 2\n\nHuman "
|
||||
"Feedback:\nHuman feedback 2\n\nImproved Output:\nImproved output 2\n\nPlease provide:\n- Provide "
|
||||
"output improved result.\n\nIteration: data1\nInitial Output:\nInitial output 1\n\nHuman Feedback:\nHuman feedback "
|
||||
"1\n\nImproved Output:\nImproved output 1\n\n------------------------------------------------\n\nIteration: data2\nInitial Output:\nInitial output 2\n\nHuman "
|
||||
"Feedback:\nHuman feedback 2\n\nImproved Output:\nImproved output 2\n\n------------------------------------------------\n\nPlease provide:\n- Provide "
|
||||
"a list of clear, actionable instructions derived from the Human Feedbacks to enhance the Agent's "
|
||||
"performance. Analyze the differences between Initial Outputs and Improved Outputs to generate specific "
|
||||
"action items for future tasks. Ensure all key and specificpoints from the human feedback are "
|
||||
|
||||
Reference in New Issue
Block a user