mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-10 08:38:30 +00:00
bug fixing
This commit is contained in:
@@ -13,7 +13,7 @@ from langchain_core.utils.input import get_color_mapping
|
||||
from pydantic import InstanceOf
|
||||
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.tools.tool_usage import ToolUsage
|
||||
from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
|
||||
from crewai.utilities import I18N
|
||||
|
||||
|
||||
@@ -111,13 +111,19 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
**inputs,
|
||||
)
|
||||
|
||||
if self._should_force_answer():
|
||||
if isinstance(output, AgentAction) or isinstance(output, AgentFinish):
|
||||
if isinstance(output, AgentFinish):
|
||||
yield output
|
||||
return
|
||||
|
||||
if isinstance(output, AgentAction):
|
||||
output = output
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Unexpected output type from agent: {type(output)}"
|
||||
)
|
||||
|
||||
yield AgentStep(
|
||||
action=output, observation=self._i18n.errors("force_final_answer")
|
||||
)
|
||||
@@ -192,6 +198,9 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
)
|
||||
tool_calling = tool_usage.parse(agent_action.log)
|
||||
|
||||
if isinstance(tool_calling, ToolUsageErrorException):
|
||||
observation = tool_calling.message
|
||||
else:
|
||||
if tool_calling.tool_name.lower().strip() in [
|
||||
name.lower().strip() for name in name_to_tool_map
|
||||
]:
|
||||
|
||||
@@ -18,7 +18,6 @@ class ToolsHandler:
|
||||
|
||||
def on_tool_use(self, calling: ToolCalling, output: str) -> Any:
|
||||
"""Run when tool ends running."""
|
||||
print(f"Tool {calling.tool_name} has been used.")
|
||||
self.last_used_tool = calling
|
||||
if calling.tool_name != CacheTools().name:
|
||||
self.cache.add(
|
||||
|
||||
@@ -114,7 +114,7 @@ class Task(BaseModel):
|
||||
for task in self.context:
|
||||
if task.async_execution:
|
||||
task.thread.join()
|
||||
context.append(task.output.result)
|
||||
context.append(task.output.raw_output)
|
||||
context = "\n".join(context)
|
||||
|
||||
tools = tools or self.tools
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, Dict
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from pydantic import BaseModel as PydanticBaseModel
|
||||
from pydantic import Field as PydanticField
|
||||
@@ -7,7 +7,7 @@ from pydantic.v1 import BaseModel, Field
|
||||
|
||||
class ToolCalling(BaseModel):
|
||||
tool_name: str = Field(..., description="The name of the tool to be called.")
|
||||
arguments: Dict[str, Any] = Field(
|
||||
arguments: Optional[Dict[str, Any]] = Field(
|
||||
..., description="A dictinary of arguments to be passed to the tool."
|
||||
)
|
||||
|
||||
@@ -16,6 +16,6 @@ class InstructorToolCalling(PydanticBaseModel):
|
||||
tool_name: str = PydanticField(
|
||||
..., description="The name of the tool to be called."
|
||||
)
|
||||
arguments: Dict = PydanticField(
|
||||
arguments: Optional[Dict[str, Any]] = PydanticField(
|
||||
..., description="A dictinary of arguments to be passed to the tool."
|
||||
)
|
||||
|
||||
@@ -113,7 +113,10 @@ class ToolUsage:
|
||||
|
||||
if not result:
|
||||
try:
|
||||
if calling.arguments:
|
||||
result = tool._run(**calling.arguments)
|
||||
else:
|
||||
result = tool._run()
|
||||
except Exception as e:
|
||||
self._run_attempts += 1
|
||||
if self._run_attempts > self._max_parsing_attempts:
|
||||
@@ -206,6 +209,7 @@ class ToolUsage:
|
||||
),
|
||||
)
|
||||
calling = instructor.to_pydantic()
|
||||
|
||||
else:
|
||||
parser = ToolOutputParser(pydantic_object=ToolCalling)
|
||||
prompt = PromptTemplate(
|
||||
@@ -234,7 +238,7 @@ class ToolUsage:
|
||||
self._telemetry.tool_usage_error(llm=self.llm)
|
||||
self._printer.print(content=f"\n\n{e}\n", color="red")
|
||||
return ToolUsageErrorException(
|
||||
f'{self._i18n.errors("tool_usage_error")}.\n{self._i18n.slice("format").format(tool_names=self.tools_names)}'
|
||||
f'{self._i18n.errors("tool_usage_error")}\n{self._i18n.slice("format").format(tool_names=self.tools_names)}'
|
||||
)
|
||||
return self._tool_calling(tool_string)
|
||||
|
||||
|
||||
@@ -15,12 +15,7 @@ class Prompts(BaseModel):
|
||||
|
||||
def task_execution_with_memory(self) -> BasePromptTemplate:
|
||||
"""Generate a prompt for task execution with memory components."""
|
||||
slices = ["role_playing"]
|
||||
if len(self.tools) > 0:
|
||||
slices.append("tools")
|
||||
else:
|
||||
slices.append("no_tools")
|
||||
slices.extend(["memory", "task"])
|
||||
slices = ["role_playing", "tools", "memory", "task"]
|
||||
return self._build_prompt(slices)
|
||||
|
||||
def task_execution_without_tools(self) -> BasePromptTemplate:
|
||||
@@ -29,12 +24,7 @@ class Prompts(BaseModel):
|
||||
|
||||
def task_execution(self) -> BasePromptTemplate:
|
||||
"""Generate a standard prompt for task execution."""
|
||||
slices = ["role_playing"]
|
||||
if len(self.tools) > 0:
|
||||
slices.append("tools")
|
||||
else:
|
||||
slices.append("no_tools")
|
||||
slices.append("task")
|
||||
slices = ["role_playing", "tools", "task"]
|
||||
return self._build_prompt(slices)
|
||||
|
||||
def _build_prompt(self, components: list[str]) -> BasePromptTemplate:
|
||||
|
||||
Reference in New Issue
Block a user