quick bug fixes

This commit is contained in:
João Moura
2024-09-18 03:22:56 -03:00
parent e77442cf34
commit 2787c9b0ef
6 changed files with 27 additions and 21 deletions

View File

@@ -1,6 +1,6 @@
[tool.poetry] [tool.poetry]
name = "crewai" name = "crewai"
version = "0.60.0" version = "0.60.4"
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks." description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
authors = ["Joao Moura <joao@crewai.com>"] authors = ["Joao Moura <joao@crewai.com>"]
readme = "README.md" readme = "README.md"

View File

@@ -118,11 +118,15 @@ class Agent(BaseAgent):
@model_validator(mode="after") @model_validator(mode="after")
def post_init_setup(self): def post_init_setup(self):
self.agent_ops_agent_name = self.role self.agent_ops_agent_name = self.role
self.llm = self.llm.model_name if hasattr(self.llm, "model_name") else self.llm self.llm = (
getattr(self.llm, "model_name", None)
or getattr(self.llm, "deployment_name", None)
or self.llm
)
self.function_calling_llm = ( self.function_calling_llm = (
self.function_calling_llm.model_name getattr(self.function_calling_llm, "model_name", None)
if hasattr(self.function_calling_llm, "model_name") or getattr(self.function_calling_llm, "deployment_name", None)
else self.function_calling_llm or self.function_calling_llm
) )
if not self.agent_executor: if not self.agent_executor:
self._setup_agent_executor() self._setup_agent_executor()

View File

@@ -200,10 +200,9 @@ class Crew(BaseModel):
self._file_handler = FileHandler(self.output_log_file) self._file_handler = FileHandler(self.output_log_file)
self._rpm_controller = RPMController(max_rpm=self.max_rpm, logger=self._logger) self._rpm_controller = RPMController(max_rpm=self.max_rpm, logger=self._logger)
self.function_calling_llm = ( self.function_calling_llm = (
self.function_calling_llm.model_name getattr(self.function_calling_llm, "model_name", None)
if self.function_calling_llm is not None or getattr(self.function_calling_llm, "deployment_name", None)
and hasattr(self.function_calling_llm, "model_name") or self.function_calling_llm
else self.function_calling_llm
) )
self._telemetry = Telemetry() self._telemetry = Telemetry()
self._telemetry.set_tracer() self._telemetry.set_tracer()
@@ -592,9 +591,9 @@ class Crew(BaseModel):
manager.tools = self.manager_agent.get_delegation_tools(self.agents) manager.tools = self.manager_agent.get_delegation_tools(self.agents)
else: else:
self.manager_llm = ( self.manager_llm = (
self.manager_llm.model_name getattr(self.manager_llm, "model_name", None)
if hasattr(self.manager_llm, "model_name") or getattr(self.manager_llm, "deployment_name", None)
else self.manager_llm or self.manager_llm
) )
manager = Agent( manager = Agent(
role=i18n.retrieve("hierarchical_manager_agent", "role"), role=i18n.retrieve("hierarchical_manager_agent", "role"),
@@ -605,6 +604,7 @@ class Crew(BaseModel):
verbose=self.verbose, verbose=self.verbose,
) )
self.manager_agent = manager self.manager_agent = manager
manager.crew = self
def _execute_tasks( def _execute_tasks(
self, self,
@@ -936,10 +936,10 @@ class Crew(BaseModel):
def test( def test(
self, self,
n_iterations: int, n_iterations: int,
openai_model_name: str, openai_model_name: Optional[str] = None,
inputs: Optional[Dict[str, Any]] = None, inputs: Optional[Dict[str, Any]] = None,
) -> None: ) -> None:
"""Test and evaluate the Crew with the given inputs for n iterations.""" """Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures."""
self._test_execution_span = self._telemetry.test_execution_span( self._test_execution_span = self._telemetry.test_execution_span(
self, n_iterations, inputs, openai_model_name self, n_iterations, inputs, openai_model_name
) )

View File

@@ -35,7 +35,7 @@ class TaskOutput(BaseModel):
return self return self
@property @property
def json(self) -> str: def json(self) -> Optional[str]:
if self.output_format != OutputFormat.JSON: if self.output_format != OutputFormat.JSON:
raise ValueError( raise ValueError(
""" """

View File

@@ -17,7 +17,7 @@ if os.environ.get("AGENTOPS_API_KEY"):
except ImportError: except ImportError:
pass pass
OPENAI_BIGGER_MODELS = ["gpt-4", "gpt-4o"] OPENAI_BIGGER_MODELS = ["gpt-4", "gpt-4o", "o1-preview", "o1-mini"]
class ToolUsageErrorException(Exception): class ToolUsageErrorException(Exception):
@@ -71,8 +71,10 @@ class ToolUsage:
self.function_calling_llm = function_calling_llm self.function_calling_llm = function_calling_llm
# Set the maximum parsing attempts for bigger models # Set the maximum parsing attempts for bigger models
if self._is_gpt(self.function_calling_llm) and "4" in self.function_calling_llm: if (
if self.function_calling_llm in OPENAI_BIGGER_MODELS: self._is_gpt(self.function_calling_llm)
and self.function_calling_llm in OPENAI_BIGGER_MODELS
):
self._max_parsing_attempts = 2 self._max_parsing_attempts = 2
self._remember_format_after_usages = 4 self._remember_format_after_usages = 4

View File

@@ -52,7 +52,7 @@ class RPMController(BaseModel):
self._timer = None self._timer = None
def _wait_for_next_minute(self): def _wait_for_next_minute(self):
time.sleep(1) time.sleep(60)
self._current_rpm = 0 self._current_rpm = 0
def _reset_request_count(self): def _reset_request_count(self):