Compare commits

..

8 Commits

Author SHA1 Message Date
Lucas Gomide
a2224bbe18 Merge branch 'main' into bugfix-python-3-10 2025-04-10 14:11:16 -03:00
Lorenze Jay
d96543d314 Merge branch 'main' into bugfix-python-3-10 2025-04-10 09:47:12 -07:00
Lucas Gomide
52e10d6c84 Merge branch 'main' into bugfix-python-3-10 2025-04-10 09:27:37 -03:00
Lorenze Jay
f18a112cd7 Merge branch 'main' into bugfix-python-3-10 2025-04-09 08:35:27 -07:00
Lucas Gomide
40dcdb43d6 Merge branch 'main' into bugfix-python-3-10 2025-04-09 11:58:16 -03:00
Lucas Gomide
1167fbdd8c chore: rename external_memory file test 2025-04-09 11:19:07 -03:00
Lucas Gomide
d200d00bb5 refactor: remove explicit Self import from typing
Python 3.10+ natively supports Self type annotation without explicit imports
2025-04-09 11:13:01 -03:00
Lucas Gomide
bf55dde358 ci(workflows): add Python version matrix (3.10-3.12) for tests 2025-04-09 11:13:01 -03:00
12 changed files with 255 additions and 108 deletions

View File

@@ -263,7 +263,6 @@ Let's create our flow in the `main.py` file:
```python ```python
#!/usr/bin/env python #!/usr/bin/env python
import json import json
import os
from typing import List, Dict from typing import List, Dict
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from crewai import LLM from crewai import LLM
@@ -342,9 +341,6 @@ class GuideCreatorFlow(Flow[GuideCreatorState]):
outline_dict = json.loads(response) outline_dict = json.loads(response)
self.state.guide_outline = GuideOutline(**outline_dict) self.state.guide_outline = GuideOutline(**outline_dict)
# Ensure output directory exists before saving
os.makedirs("output", exist_ok=True)
# Save the outline to a file # Save the outline to a file
with open("output/guide_outline.json", "w") as f: with open("output/guide_outline.json", "w") as f:
json.dump(outline_dict, f, indent=2) json.dump(outline_dict, f, indent=2)

View File

@@ -25,7 +25,7 @@ uv add weaviate-client
To effectively use the `WeaviateVectorSearchTool`, follow these steps: To effectively use the `WeaviateVectorSearchTool`, follow these steps:
1. **Package Installation**: Confirm that the `crewai[tools]` and `weaviate-client` packages are installed in your Python environment. 1. **Package Installation**: Confirm that the `crewai[tools]` and `weaviate-client` packages are installed in your Python environment.
2. **Weaviate Setup**: Set up a Weaviate cluster. You can follow the [Weaviate documentation](https://weaviate.io/developers/wcs/manage-clusters/connect) for instructions. 2. **Weaviate Setup**: Set up a Weaviate cluster. You can follow the [Weaviate documentation](https://weaviate.io/developers/wcs/connect) for instructions.
3. **API Keys**: Obtain your Weaviate cluster URL and API key. 3. **API Keys**: Obtain your Weaviate cluster URL and API key.
4. **OpenAI API Key**: Ensure you have an OpenAI API key set in your environment variables as `OPENAI_API_KEY`. 4. **OpenAI API Key**: Ensure you have an OpenAI API key set in your environment variables as `OPENAI_API_KEY`.
@@ -161,4 +161,4 @@ rag_agent = Agent(
## Conclusion ## Conclusion
The `WeaviateVectorSearchTool` provides a powerful way to search for semantically similar documents in a Weaviate vector database. By leveraging vector embeddings, it enables more accurate and contextually relevant search results compared to traditional keyword-based searches. This tool is particularly useful for applications that require finding information based on meaning rather than exact matches. The `WeaviateVectorSearchTool` provides a powerful way to search for semantically similar documents in a Weaviate vector database. By leveraging vector embeddings, it enables more accurate and contextually relevant search results compared to traditional keyword-based searches. This tool is particularly useful for applications that require finding information based on meaning rather than exact matches.

View File

@@ -482,7 +482,6 @@ class Agent(BaseAgent):
verbose=self.verbose, verbose=self.verbose,
response_format=response_format, response_format=response_format,
i18n=self.i18n, i18n=self.i18n,
original_agent=self,
) )
return lite_agent.kickoff(messages) return lite_agent.kickoff(messages)

View File

@@ -47,6 +47,11 @@ from crewai.utilities.events.llm_events import (
LLMCallStartedEvent, LLMCallStartedEvent,
LLMCallType, LLMCallType,
) )
from crewai.utilities.events.tool_usage_events import (
ToolUsageErrorEvent,
ToolUsageFinishedEvent,
ToolUsageStartedEvent,
)
from crewai.utilities.llm_utils import create_llm from crewai.utilities.llm_utils import create_llm
from crewai.utilities.printer import Printer from crewai.utilities.printer import Printer
from crewai.utilities.token_counter_callback import TokenCalcHandler from crewai.utilities.token_counter_callback import TokenCalcHandler
@@ -150,10 +155,6 @@ class LiteAgent(BaseModel):
default=[], description="Results of the tools used by the agent." default=[], description="Results of the tools used by the agent."
) )
# Reference of Agent
original_agent: Optional[BaseAgent] = Field(
default=None, description="Reference to the agent that created this LiteAgent"
)
# Private Attributes # Private Attributes
_parsed_tools: List[CrewStructuredTool] = PrivateAttr(default_factory=list) _parsed_tools: List[CrewStructuredTool] = PrivateAttr(default_factory=list)
_token_process: TokenProcess = PrivateAttr(default_factory=TokenProcess) _token_process: TokenProcess = PrivateAttr(default_factory=TokenProcess)
@@ -162,7 +163,7 @@ class LiteAgent(BaseModel):
_messages: List[Dict[str, str]] = PrivateAttr(default_factory=list) _messages: List[Dict[str, str]] = PrivateAttr(default_factory=list)
_iterations: int = PrivateAttr(default=0) _iterations: int = PrivateAttr(default=0)
_printer: Printer = PrivateAttr(default_factory=Printer) _printer: Printer = PrivateAttr(default_factory=Printer)
@model_validator(mode="after") @model_validator(mode="after")
def setup_llm(self): def setup_llm(self):
"""Set up the LLM and other components after initialization.""" """Set up the LLM and other components after initialization."""
@@ -411,6 +412,18 @@ class LiteAgent(BaseModel):
formatted_answer = process_llm_response(answer, self.use_stop_words) formatted_answer = process_llm_response(answer, self.use_stop_words)
if isinstance(formatted_answer, AgentAction): if isinstance(formatted_answer, AgentAction):
# Emit tool usage started event
crewai_event_bus.emit(
self,
event=ToolUsageStartedEvent(
agent_key=self.key,
agent_role=self.role,
tool_name=formatted_answer.tool,
tool_args=formatted_answer.tool_input,
tool_class=formatted_answer.tool,
),
)
try: try:
tool_result = execute_tool_and_check_finality( tool_result = execute_tool_and_check_finality(
agent_action=formatted_answer, agent_action=formatted_answer,
@@ -418,9 +431,34 @@ class LiteAgent(BaseModel):
i18n=self.i18n, i18n=self.i18n,
agent_key=self.key, agent_key=self.key,
agent_role=self.role, agent_role=self.role,
agent=self.original_agent, )
# Emit tool usage finished event
crewai_event_bus.emit(
self,
event=ToolUsageFinishedEvent(
agent_key=self.key,
agent_role=self.role,
tool_name=formatted_answer.tool,
tool_args=formatted_answer.tool_input,
tool_class=formatted_answer.tool,
started_at=datetime.now(),
finished_at=datetime.now(),
output=tool_result.result,
),
) )
except Exception as e: except Exception as e:
# Emit tool usage error event
crewai_event_bus.emit(
self,
event=ToolUsageErrorEvent(
agent_key=self.key,
agent_role=self.role,
tool_name=formatted_answer.tool,
tool_args=formatted_answer.tool_input,
tool_class=formatted_answer.tool,
error=str(e),
),
)
raise e raise e
formatted_answer = handle_agent_action_core( formatted_answer = handle_agent_action_core(

View File

@@ -707,6 +707,15 @@ class LLM(BaseLLM):
function_name, lambda: None function_name, lambda: None
) # Ensure fn is always a callable ) # Ensure fn is always a callable
logging.error(f"Error executing function '{function_name}': {e}") logging.error(f"Error executing function '{function_name}': {e}")
crewai_event_bus.emit(
self,
event=ToolExecutionErrorEvent(
tool_name=function_name,
tool_args=function_args,
tool_class=fn,
error=str(e),
),
)
crewai_event_bus.emit( crewai_event_bus.emit(
self, self,
event=LLMCallFailedEvent(error=f"Tool execution error: {str(e)}"), event=LLMCallFailedEvent(error=f"Tool execution error: {str(e)}"),

View File

@@ -1,4 +1,3 @@
import asyncio
import warnings import warnings
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from inspect import signature from inspect import signature
@@ -66,13 +65,7 @@ class BaseTool(BaseModel, ABC):
**kwargs: Any, **kwargs: Any,
) -> Any: ) -> Any:
print(f"Using Tool: {self.name}") print(f"Using Tool: {self.name}")
result = self._run(*args, **kwargs) return self._run(*args, **kwargs)
# If _run is async, we safely run it
if asyncio.iscoroutine(result):
return asyncio.run(result)
return result
@abstractmethod @abstractmethod
def _run( def _run(

View File

@@ -2,6 +2,7 @@ import ast
import datetime import datetime
import json import json
import time import time
from dataclasses import dataclass
from difflib import SequenceMatcher from difflib import SequenceMatcher
from json import JSONDecodeError from json import JSONDecodeError
from textwrap import dedent from textwrap import dedent
@@ -25,7 +26,6 @@ from crewai.utilities.events.tool_usage_events import (
ToolSelectionErrorEvent, ToolSelectionErrorEvent,
ToolUsageErrorEvent, ToolUsageErrorEvent,
ToolUsageFinishedEvent, ToolUsageFinishedEvent,
ToolUsageStartedEvent,
ToolValidateInputErrorEvent, ToolValidateInputErrorEvent,
) )
@@ -166,21 +166,6 @@ class ToolUsage:
if self.task: if self.task:
self.task.increment_tools_errors() self.task.increment_tools_errors()
if self.agent:
event_data = {
"agent_key": self.agent.key,
"agent_role": self.agent.role,
"tool_name": self.action.tool,
"tool_args": self.action.tool_input,
"tool_class": self.action.tool,
"agent": self.agent,
}
if self.agent.fingerprint:
event_data.update(self.agent.fingerprint)
crewai_event_bus.emit(self,ToolUsageStartedEvent(**event_data))
started_at = time.time() started_at = time.time()
from_cache = False from_cache = False
result = None # type: ignore result = None # type: ignore

View File

@@ -16,6 +16,7 @@ from crewai.tools.base_tool import BaseTool
from crewai.tools.structured_tool import CrewStructuredTool from crewai.tools.structured_tool import CrewStructuredTool
from crewai.tools.tool_types import ToolResult from crewai.tools.tool_types import ToolResult
from crewai.utilities import I18N, Printer from crewai.utilities import I18N, Printer
from crewai.utilities.events.tool_usage_events import ToolUsageStartedEvent
from crewai.utilities.exceptions.context_window_exceeding_exception import ( from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededException, LLMContextLengthExceededException,
) )

View File

@@ -5,6 +5,11 @@ from crewai.security import Fingerprint
from crewai.tools.structured_tool import CrewStructuredTool from crewai.tools.structured_tool import CrewStructuredTool
from crewai.tools.tool_types import ToolResult from crewai.tools.tool_types import ToolResult
from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
from crewai.utilities.events import crewai_event_bus
from crewai.utilities.events.tool_usage_events import (
ToolUsageErrorEvent,
ToolUsageStartedEvent,
)
from crewai.utilities.i18n import I18N from crewai.utilities.i18n import I18N
@@ -37,8 +42,10 @@ def execute_tool_and_check_finality(
ToolResult containing the execution result and whether it should be treated as a final answer ToolResult containing the execution result and whether it should be treated as a final answer
""" """
try: try:
# Create tool name to tool map
tool_name_to_tool_map = {tool.name: tool for tool in tools} tool_name_to_tool_map = {tool.name: tool for tool in tools}
# Emit tool usage event if agent info is available
if agent_key and agent_role and agent: if agent_key and agent_role and agent:
fingerprint_context = fingerprint_context or {} fingerprint_context = fingerprint_context or {}
if agent: if agent:
@@ -52,6 +59,22 @@ def execute_tool_and_check_finality(
except Exception as e: except Exception as e:
raise ValueError(f"Failed to set fingerprint: {e}") raise ValueError(f"Failed to set fingerprint: {e}")
event_data = {
"agent_key": agent_key,
"agent_role": agent_role,
"tool_name": agent_action.tool,
"tool_args": agent_action.tool_input,
"tool_class": agent_action.tool,
"agent": agent,
}
event_data.update(fingerprint_context)
crewai_event_bus.emit(
agent,
event=ToolUsageStartedEvent(
**event_data,
),
)
# Create tool usage instance # Create tool usage instance
tool_usage = ToolUsage( tool_usage = ToolUsage(
tools_handler=tools_handler, tools_handler=tools_handler,
@@ -87,4 +110,17 @@ def execute_tool_and_check_finality(
return ToolResult(tool_result, False) return ToolResult(tool_result, False)
except Exception as e: except Exception as e:
# Emit error event if agent info is available
if agent_key and agent_role and agent:
crewai_event_bus.emit(
agent,
event=ToolUsageErrorEvent(
agent_key=agent_key,
agent_role=agent_role,
tool_name=agent_action.tool,
tool_args=agent_action.tool_input,
tool_class=agent_action.tool,
error=str(e),
),
)
raise e raise e

View File

@@ -0,0 +1,112 @@
interactions:
- request:
body: '{"messages": [{"role": "user", "content": "Use the failing tool"}], "model":
"gpt-4o-mini", "stop": [], "tools": [{"type": "function", "function": {"name":
"failing_tool", "description": "This tool always fails.", "parameters": {"type":
"object", "properties": {"param": {"type": "string", "description": "A test
parameter"}}, "required": ["param"]}}}]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '353'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.61.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.61.0
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.8
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-B2P4zoJZuES7Aom8ugEq1modz5Vsl\",\n \"object\":
\"chat.completion\",\n \"created\": 1739912761,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
\ \"id\": \"call_F6fJxISpMKUBIGV6dd2vjRNG\",\n \"type\":
\"function\",\n \"function\": {\n \"name\": \"failing_tool\",\n
\ \"arguments\": \"{\\\"param\\\":\\\"test\\\"}\"\n }\n
\ }\n ],\n \"refusal\": null\n },\n \"logprobs\":
null,\n \"finish_reason\": \"tool_calls\"\n }\n ],\n \"usage\": {\n
\ \"prompt_tokens\": 51,\n \"completion_tokens\": 15,\n \"total_tokens\":
66,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\":
0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\":
0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\":
0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\":
\"fp_00428b782a\"\n}\n"
headers:
CF-RAY:
- 9140fa827f38eb1e-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 18 Feb 2025 21:06:02 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=xbuu3IQpCMh.43ZrqL1TRMECOc6QldgHV0hzOX1GrWI-1739912762-1.0.1.1-t7iyq5xMioPrwfeaHLvPT9rwRPp7Q9A9uIm69icH9dPxRD4xMA3cWqb1aXj1_e2IyAEQQWFe1UWjlmJ22aHh3Q;
path=/; expires=Tue, 18-Feb-25 21:36:02 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=x9l.Rhja8_wXDN.j8qcEU1PvvEqAwZp4Fd3s_aj4qwM-1739912762161-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '861'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999978'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_8666ec3aa6677cb346ba00993556051d
http_version: HTTP/1.1
status_code: 200
version: 1

View File

@@ -395,3 +395,51 @@ def test_deepseek_r1_with_open_router():
result = llm.call("What is the capital of France?") result = llm.call("What is the capital of France?")
assert isinstance(result, str) assert isinstance(result, str)
assert "Paris" in result assert "Paris" in result
@pytest.mark.vcr(filter_headers=["authorization"])
def test_tool_execution_error_event():
llm = LLM(model="gpt-4o-mini")
def failing_tool(param: str) -> str:
"""This tool always fails."""
raise Exception("Tool execution failed!")
tool_schema = {
"type": "function",
"function": {
"name": "failing_tool",
"description": "This tool always fails.",
"parameters": {
"type": "object",
"properties": {
"param": {"type": "string", "description": "A test parameter"}
},
"required": ["param"],
},
},
}
received_events = []
@crewai_event_bus.on(ToolExecutionErrorEvent)
def event_handler(source, event):
received_events.append(event)
available_functions = {"failing_tool": failing_tool}
messages = [{"role": "user", "content": "Use the failing tool"}]
llm.call(
messages,
tools=[tool_schema],
available_functions=available_functions,
)
assert len(received_events) == 1
event = received_events[0]
assert isinstance(event, ToolExecutionErrorEvent)
assert event.tool_name == "failing_tool"
assert event.tool_args == {"param": "test"}
assert event.tool_class == failing_tool
assert "Tool execution failed!" in event.error

View File

@@ -1,8 +1,4 @@
import asyncio from typing import Callable
import inspect
import unittest
from typing import Any, Callable, Dict, List
from unittest.mock import patch
from crewai.tools import BaseTool, tool from crewai.tools import BaseTool, tool
@@ -126,69 +122,3 @@ def test_result_as_answer_in_tool_decorator():
converted_tool = my_tool_with_default.to_structured_tool() converted_tool = my_tool_with_default.to_structured_tool()
assert converted_tool.result_as_answer is False assert converted_tool.result_as_answer is False
class SyncTool(BaseTool):
"""Test implementation with a synchronous _run method"""
name: str = "sync_tool"
description: str = "A synchronous tool for testing"
def _run(self, input_text: str) -> str:
"""Process input text synchronously."""
return f"Processed {input_text} synchronously"
class AsyncTool(BaseTool):
"""Test implementation with an asynchronous _run method"""
name: str = "async_tool"
description: str = "An asynchronous tool for testing"
async def _run(self, input_text: str) -> str:
"""Process input text asynchronously."""
await asyncio.sleep(0.1) # Simulate async operation
return f"Processed {input_text} asynchronously"
def test_sync_run_returns_direct_result():
"""Test that _run in a synchronous tool returns a direct result, not a coroutine."""
tool = SyncTool()
result = tool._run(input_text="hello")
assert not asyncio.iscoroutine(result)
assert result == "Processed hello synchronously"
run_result = tool.run(input_text="hello")
assert run_result == "Processed hello synchronously"
def test_async_run_returns_coroutine():
"""Test that _run in an asynchronous tool returns a coroutine object."""
tool = AsyncTool()
result = tool._run(input_text="hello")
assert asyncio.iscoroutine(result)
result.close() # Clean up the coroutine
def test_run_calls_asyncio_run_for_async_tools():
"""Test that asyncio.run is called when using async tools."""
async_tool = AsyncTool()
with patch('asyncio.run') as mock_run:
mock_run.return_value = "Processed test asynchronously"
async_result = async_tool.run(input_text="test")
mock_run.assert_called_once()
assert async_result == "Processed test asynchronously"
def test_run_does_not_call_asyncio_run_for_sync_tools():
"""Test that asyncio.run is NOT called when using sync tools."""
sync_tool = SyncTool()
with patch('asyncio.run') as mock_run:
sync_result = sync_tool.run(input_text="test")
mock_run.assert_not_called()
assert sync_result == "Processed test synchronously"