Merge branch 'main' into feature/procedure_v2

This commit is contained in:
Brandon Hancock
2024-07-29 14:27:28 -04:00
4 changed files with 22 additions and 43 deletions

View File

@@ -55,6 +55,9 @@ class Agent(BaseAgent):
tools: Tools at agents disposal
step_callback: Callback to be executed after each step of the agent execution.
callbacks: A list of callback functions from the langchain library that are triggered during the agent's execution process
"""
_times_executed: int = PrivateAttr(default=0)
max_execution_time: Optional[int] = Field(
default=None,
description="Maximum execution time for an agent to execute a task",
@@ -186,6 +189,20 @@ class Agent(BaseAgent):
else:
task_prompt = self._use_trained_data(task_prompt=task_prompt)
try:
result = self.agent_executor.invoke(
{
"input": task_prompt,
"tool_names": self.agent_executor.tools_names,
"tools": self.agent_executor.tools_description,
}
)["output"]
except Exception as e:
self._times_executed += 1
if self._times_executed > self.max_retry_limit:
raise e
result = self.execute_task(task, context, tools)
if self.max_rpm:
self._rpm_controller.stop_rpm_counter()

View File

@@ -19,7 +19,7 @@ class ShortTermMemory(Memory):
super().__init__(storage)
def save(self, item: ShortTermMemoryItem) -> None:
super().save(item.data, item.metadata, item.agent)
super().save(value=item.data, metadata=item.metadata, agent=item.agent)
def search(self, query: str, score_threshold: float = 0.35):
return self.storage.search(query=query, score_threshold=score_threshold) # type: ignore # BUG? The reference is to the parent class, but the parent class does not have this parameters

View File

@@ -356,6 +356,9 @@ class Task(BaseModel):
return OutputFormat.RAW
def _save_file(self, result: Any) -> None:
if self.output_file is None:
raise ValueError("output_file is not set.")
directory = os.path.dirname(self.output_file) # type: ignore # Value of type variable "AnyOrLiteralStr" of "dirname" cannot be "str | None"
if directory and not os.path.exists(directory):
@@ -364,6 +367,7 @@ class Task(BaseModel):
with open(self.output_file, "w", encoding="utf-8") as file:
if isinstance(result, dict):
import json
json.dump(result, file, ensure_ascii=False, indent=2)
else:
file.write(str(result))

View File

@@ -8,7 +8,6 @@ from unittest.mock import MagicMock, patch
import pydantic_core
import pytest
from crewai.agent import Agent
from crewai.agents.cache import CacheHandler
from crewai.crew import Crew
@@ -572,47 +571,6 @@ def test_api_calls_throttling(capsys):
moveon.assert_called()
# This test is not consistent, some issue is happening on the CI when it comes to Prompt tokens
# {'usage_metrics': {'completion_tokens': 34, 'prompt_tokens': 0, 'successful_requests': 2, 'total_tokens': 34}} CI OUTPUT
# {'usage_metrics': {'completion_tokens': 34, 'prompt_tokens': 314, 'successful_requests': 2, 'total_tokens': 348}}
# The issue might be related to the calculate_usage_metrics function
# @pytest.mark.vcr(filter_headers=["authorization"])
# def test_crew_full_output():
# agent = Agent(
# role="test role",
# goal="test goal",
# backstory="test backstory",
# allow_delegation=False,
# verbose=True,
# )
# task1 = Task(
# description="just say hi!",
# expected_output="your greeting",
# agent=agent,
# )
# task2 = Task(
# description="just say hello!",
# expected_output="your greeting",
# agent=agent,
# )
# crew = Crew(agents=[agent], tasks=[task1, task2], full_output=True)
# result = crew.kickoff()
# assert result == {
# "final_output": "Hello!",
# "tasks_outputs": [task1.output, task2.output],
# "usage_metrics": {
# "total_tokens": 348,
# "prompt_tokens": 314,
# "completion_tokens": 34,
# "successful_requests": 2,
# },
# }
@pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_kickoff_usage_metrics():
inputs = [