mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-29 02:38:29 +00:00
Compare commits
13 Commits
bugfix-pyt
...
devin/1744
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0ca93ba0fa | ||
|
|
9267fc0555 | ||
|
|
67032d56d9 | ||
|
|
8702bf1e34 | ||
|
|
6a1eb10830 | ||
|
|
10edde100e | ||
|
|
1b9cdda134 | ||
|
|
e8c9c734a8 | ||
|
|
40a441f30e | ||
|
|
ea5ae9086a | ||
|
|
0cd524af86 | ||
|
|
4bff5408d8 | ||
|
|
d2caf11191 |
8
.github/workflows/tests.yml
vendored
8
.github/workflows/tests.yml
vendored
@@ -12,6 +12,9 @@ jobs:
|
||||
tests:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ['3.10', '3.11', '3.12']
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -21,9 +24,8 @@ jobs:
|
||||
with:
|
||||
enable-cache: true
|
||||
|
||||
|
||||
- name: Set up Python
|
||||
run: uv python install 3.12.8
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
run: uv python install ${{ matrix.python-version }}
|
||||
|
||||
- name: Install the project
|
||||
run: uv sync --dev --all-extras
|
||||
|
||||
@@ -263,6 +263,7 @@ Let's create our flow in the `main.py` file:
|
||||
```python
|
||||
#!/usr/bin/env python
|
||||
import json
|
||||
import os
|
||||
from typing import List, Dict
|
||||
from pydantic import BaseModel, Field
|
||||
from crewai import LLM
|
||||
@@ -341,6 +342,9 @@ class GuideCreatorFlow(Flow[GuideCreatorState]):
|
||||
outline_dict = json.loads(response)
|
||||
self.state.guide_outline = GuideOutline(**outline_dict)
|
||||
|
||||
# Ensure output directory exists before saving
|
||||
os.makedirs("output", exist_ok=True)
|
||||
|
||||
# Save the outline to a file
|
||||
with open("output/guide_outline.json", "w") as f:
|
||||
json.dump(outline_dict, f, indent=2)
|
||||
|
||||
@@ -25,7 +25,7 @@ uv add weaviate-client
|
||||
To effectively use the `WeaviateVectorSearchTool`, follow these steps:
|
||||
|
||||
1. **Package Installation**: Confirm that the `crewai[tools]` and `weaviate-client` packages are installed in your Python environment.
|
||||
2. **Weaviate Setup**: Set up a Weaviate cluster. You can follow the [Weaviate documentation](https://weaviate.io/developers/wcs/connect) for instructions.
|
||||
2. **Weaviate Setup**: Set up a Weaviate cluster. You can follow the [Weaviate documentation](https://weaviate.io/developers/wcs/manage-clusters/connect) for instructions.
|
||||
3. **API Keys**: Obtain your Weaviate cluster URL and API key.
|
||||
4. **OpenAI API Key**: Ensure you have an OpenAI API key set in your environment variables as `OPENAI_API_KEY`.
|
||||
|
||||
@@ -161,4 +161,4 @@ rag_agent = Agent(
|
||||
|
||||
## Conclusion
|
||||
|
||||
The `WeaviateVectorSearchTool` provides a powerful way to search for semantically similar documents in a Weaviate vector database. By leveraging vector embeddings, it enables more accurate and contextually relevant search results compared to traditional keyword-based searches. This tool is particularly useful for applications that require finding information based on meaning rather than exact matches.
|
||||
The `WeaviateVectorSearchTool` provides a powerful way to search for semantically similar documents in a Weaviate vector database. By leveraging vector embeddings, it enables more accurate and contextually relevant search results compared to traditional keyword-based searches. This tool is particularly useful for applications that require finding information based on meaning rather than exact matches.
|
||||
|
||||
@@ -482,6 +482,7 @@ class Agent(BaseAgent):
|
||||
verbose=self.verbose,
|
||||
response_format=response_format,
|
||||
i18n=self.i18n,
|
||||
original_agent=self,
|
||||
)
|
||||
|
||||
return lite_agent.kickoff(messages)
|
||||
|
||||
@@ -33,7 +33,8 @@ def train():
|
||||
Train the crew for a given number of iterations.
|
||||
"""
|
||||
inputs = {
|
||||
"topic": "AI LLMs"
|
||||
"topic": "AI LLMs",
|
||||
'current_year': str(datetime.now().year)
|
||||
}
|
||||
try:
|
||||
{{crew_name}}().crew().train(n_iterations=int(sys.argv[1]), filename=sys.argv[2], inputs=inputs)
|
||||
@@ -59,6 +60,7 @@ def test():
|
||||
"topic": "AI LLMs",
|
||||
"current_year": str(datetime.now().year)
|
||||
}
|
||||
|
||||
try:
|
||||
{{crew_name}}().crew().test(n_iterations=int(sys.argv[1]), eval_llm=sys.argv[2], inputs=inputs)
|
||||
|
||||
|
||||
@@ -1214,6 +1214,17 @@ class Crew(BaseModel):
|
||||
|
||||
copied_data = self.model_dump(exclude=exclude)
|
||||
copied_data = {k: v for k, v in copied_data.items() if v is not None}
|
||||
if self.short_term_memory:
|
||||
copied_data["short_term_memory"] = self.short_term_memory.model_copy(deep=True)
|
||||
if self.long_term_memory:
|
||||
copied_data["long_term_memory"] = self.long_term_memory.model_copy(deep=True)
|
||||
if self.entity_memory:
|
||||
copied_data["entity_memory"] = self.entity_memory.model_copy(deep=True)
|
||||
if self.external_memory:
|
||||
copied_data["external_memory"] = self.external_memory.model_copy(deep=True)
|
||||
if self.user_memory:
|
||||
copied_data["user_memory"] = self.user_memory.model_copy(deep=True)
|
||||
|
||||
|
||||
copied_data.pop("agents", None)
|
||||
copied_data.pop("tasks", None)
|
||||
|
||||
@@ -47,11 +47,6 @@ from crewai.utilities.events.llm_events import (
|
||||
LLMCallStartedEvent,
|
||||
LLMCallType,
|
||||
)
|
||||
from crewai.utilities.events.tool_usage_events import (
|
||||
ToolUsageErrorEvent,
|
||||
ToolUsageFinishedEvent,
|
||||
ToolUsageStartedEvent,
|
||||
)
|
||||
from crewai.utilities.llm_utils import create_llm
|
||||
from crewai.utilities.printer import Printer
|
||||
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||
@@ -155,6 +150,10 @@ class LiteAgent(BaseModel):
|
||||
default=[], description="Results of the tools used by the agent."
|
||||
)
|
||||
|
||||
# Reference of Agent
|
||||
original_agent: Optional[BaseAgent] = Field(
|
||||
default=None, description="Reference to the agent that created this LiteAgent"
|
||||
)
|
||||
# Private Attributes
|
||||
_parsed_tools: List[CrewStructuredTool] = PrivateAttr(default_factory=list)
|
||||
_token_process: TokenProcess = PrivateAttr(default_factory=TokenProcess)
|
||||
@@ -163,7 +162,7 @@ class LiteAgent(BaseModel):
|
||||
_messages: List[Dict[str, str]] = PrivateAttr(default_factory=list)
|
||||
_iterations: int = PrivateAttr(default=0)
|
||||
_printer: Printer = PrivateAttr(default_factory=Printer)
|
||||
|
||||
|
||||
@model_validator(mode="after")
|
||||
def setup_llm(self):
|
||||
"""Set up the LLM and other components after initialization."""
|
||||
@@ -412,18 +411,6 @@ class LiteAgent(BaseModel):
|
||||
formatted_answer = process_llm_response(answer, self.use_stop_words)
|
||||
|
||||
if isinstance(formatted_answer, AgentAction):
|
||||
# Emit tool usage started event
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=ToolUsageStartedEvent(
|
||||
agent_key=self.key,
|
||||
agent_role=self.role,
|
||||
tool_name=formatted_answer.tool,
|
||||
tool_args=formatted_answer.tool_input,
|
||||
tool_class=formatted_answer.tool,
|
||||
),
|
||||
)
|
||||
|
||||
try:
|
||||
tool_result = execute_tool_and_check_finality(
|
||||
agent_action=formatted_answer,
|
||||
@@ -431,34 +418,9 @@ class LiteAgent(BaseModel):
|
||||
i18n=self.i18n,
|
||||
agent_key=self.key,
|
||||
agent_role=self.role,
|
||||
)
|
||||
# Emit tool usage finished event
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=ToolUsageFinishedEvent(
|
||||
agent_key=self.key,
|
||||
agent_role=self.role,
|
||||
tool_name=formatted_answer.tool,
|
||||
tool_args=formatted_answer.tool_input,
|
||||
tool_class=formatted_answer.tool,
|
||||
started_at=datetime.now(),
|
||||
finished_at=datetime.now(),
|
||||
output=tool_result.result,
|
||||
),
|
||||
agent=self.original_agent,
|
||||
)
|
||||
except Exception as e:
|
||||
# Emit tool usage error event
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=ToolUsageErrorEvent(
|
||||
agent_key=self.key,
|
||||
agent_role=self.role,
|
||||
tool_name=formatted_answer.tool,
|
||||
tool_args=formatted_answer.tool_input,
|
||||
tool_class=formatted_answer.tool,
|
||||
error=str(e),
|
||||
),
|
||||
)
|
||||
raise e
|
||||
|
||||
formatted_answer = handle_agent_action_core(
|
||||
|
||||
@@ -707,15 +707,6 @@ class LLM(BaseLLM):
|
||||
function_name, lambda: None
|
||||
) # Ensure fn is always a callable
|
||||
logging.error(f"Error executing function '{function_name}': {e}")
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=ToolExecutionErrorEvent(
|
||||
tool_name=function_name,
|
||||
tool_args=function_args,
|
||||
tool_class=fn,
|
||||
error=str(e),
|
||||
),
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LLMCallFailedEvent(error=f"Tool execution error: {str(e)}"),
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import TYPE_CHECKING, Any, Dict, Optional, Self
|
||||
from typing import TYPE_CHECKING, Any, Dict, Optional
|
||||
|
||||
from crewai.memory.external.external_memory_item import ExternalMemoryItem
|
||||
from crewai.memory.memory import Memory
|
||||
@@ -52,7 +52,7 @@ class ExternalMemory(Memory):
|
||||
def reset(self) -> None:
|
||||
self.storage.reset()
|
||||
|
||||
def set_crew(self, crew: Any) -> Self:
|
||||
def set_crew(self, crew: Any) -> "ExternalMemory":
|
||||
super().set_crew(crew)
|
||||
|
||||
if not self.storage:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, Dict, List, Optional, Self
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
@@ -38,6 +38,6 @@ class Memory(BaseModel):
|
||||
query=query, limit=limit, score_threshold=score_threshold
|
||||
)
|
||||
|
||||
def set_crew(self, crew: Any) -> Self:
|
||||
def set_crew(self, crew: Any) -> "Memory":
|
||||
self.crew = crew
|
||||
return self
|
||||
|
||||
@@ -48,7 +48,7 @@ class Mem0Storage(Storage):
|
||||
self.memory = MemoryClient(api_key=mem0_api_key)
|
||||
else:
|
||||
if mem0_local_config and len(mem0_local_config):
|
||||
self.memory = Memory.from_config(config)
|
||||
self.memory = Memory.from_config(mem0_local_config)
|
||||
else:
|
||||
self.memory = Memory()
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import asyncio
|
||||
import warnings
|
||||
from abc import ABC, abstractmethod
|
||||
from inspect import signature
|
||||
@@ -65,7 +66,13 @@ class BaseTool(BaseModel, ABC):
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
print(f"Using Tool: {self.name}")
|
||||
return self._run(*args, **kwargs)
|
||||
result = self._run(*args, **kwargs)
|
||||
|
||||
# If _run is async, we safely run it
|
||||
if asyncio.iscoroutine(result):
|
||||
return asyncio.run(result)
|
||||
|
||||
return result
|
||||
|
||||
@abstractmethod
|
||||
def _run(
|
||||
|
||||
@@ -2,7 +2,6 @@ import ast
|
||||
import datetime
|
||||
import json
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from difflib import SequenceMatcher
|
||||
from json import JSONDecodeError
|
||||
from textwrap import dedent
|
||||
@@ -26,6 +25,7 @@ from crewai.utilities.events.tool_usage_events import (
|
||||
ToolSelectionErrorEvent,
|
||||
ToolUsageErrorEvent,
|
||||
ToolUsageFinishedEvent,
|
||||
ToolUsageStartedEvent,
|
||||
ToolValidateInputErrorEvent,
|
||||
)
|
||||
|
||||
@@ -166,6 +166,21 @@ class ToolUsage:
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
|
||||
if self.agent:
|
||||
event_data = {
|
||||
"agent_key": self.agent.key,
|
||||
"agent_role": self.agent.role,
|
||||
"tool_name": self.action.tool,
|
||||
"tool_args": self.action.tool_input,
|
||||
"tool_class": self.action.tool,
|
||||
"agent": self.agent,
|
||||
}
|
||||
|
||||
if self.agent.fingerprint:
|
||||
event_data.update(self.agent.fingerprint)
|
||||
|
||||
crewai_event_bus.emit(self,ToolUsageStartedEvent(**event_data))
|
||||
|
||||
started_at = time.time()
|
||||
from_cache = False
|
||||
result = None # type: ignore
|
||||
|
||||
@@ -16,7 +16,6 @@ from crewai.tools.base_tool import BaseTool
|
||||
from crewai.tools.structured_tool import CrewStructuredTool
|
||||
from crewai.tools.tool_types import ToolResult
|
||||
from crewai.utilities import I18N, Printer
|
||||
from crewai.utilities.events.tool_usage_events import ToolUsageStartedEvent
|
||||
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
LLMContextLengthExceededException,
|
||||
)
|
||||
|
||||
@@ -5,11 +5,6 @@ from crewai.security import Fingerprint
|
||||
from crewai.tools.structured_tool import CrewStructuredTool
|
||||
from crewai.tools.tool_types import ToolResult
|
||||
from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
|
||||
from crewai.utilities.events import crewai_event_bus
|
||||
from crewai.utilities.events.tool_usage_events import (
|
||||
ToolUsageErrorEvent,
|
||||
ToolUsageStartedEvent,
|
||||
)
|
||||
from crewai.utilities.i18n import I18N
|
||||
|
||||
|
||||
@@ -42,10 +37,8 @@ def execute_tool_and_check_finality(
|
||||
ToolResult containing the execution result and whether it should be treated as a final answer
|
||||
"""
|
||||
try:
|
||||
# Create tool name to tool map
|
||||
tool_name_to_tool_map = {tool.name: tool for tool in tools}
|
||||
|
||||
# Emit tool usage event if agent info is available
|
||||
if agent_key and agent_role and agent:
|
||||
fingerprint_context = fingerprint_context or {}
|
||||
if agent:
|
||||
@@ -59,22 +52,6 @@ def execute_tool_and_check_finality(
|
||||
except Exception as e:
|
||||
raise ValueError(f"Failed to set fingerprint: {e}")
|
||||
|
||||
event_data = {
|
||||
"agent_key": agent_key,
|
||||
"agent_role": agent_role,
|
||||
"tool_name": agent_action.tool,
|
||||
"tool_args": agent_action.tool_input,
|
||||
"tool_class": agent_action.tool,
|
||||
"agent": agent,
|
||||
}
|
||||
event_data.update(fingerprint_context)
|
||||
crewai_event_bus.emit(
|
||||
agent,
|
||||
event=ToolUsageStartedEvent(
|
||||
**event_data,
|
||||
),
|
||||
)
|
||||
|
||||
# Create tool usage instance
|
||||
tool_usage = ToolUsage(
|
||||
tools_handler=tools_handler,
|
||||
@@ -110,17 +87,4 @@ def execute_tool_and_check_finality(
|
||||
return ToolResult(tool_result, False)
|
||||
|
||||
except Exception as e:
|
||||
# Emit error event if agent info is available
|
||||
if agent_key and agent_role and agent:
|
||||
crewai_event_bus.emit(
|
||||
agent,
|
||||
event=ToolUsageErrorEvent(
|
||||
agent_key=agent_key,
|
||||
agent_role=agent_role,
|
||||
tool_name=agent_action.tool,
|
||||
tool_args=agent_action.tool_input,
|
||||
tool_class=agent_action.tool,
|
||||
error=str(e),
|
||||
),
|
||||
)
|
||||
raise e
|
||||
|
||||
@@ -1,112 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Use the failing tool"}], "model":
|
||||
"gpt-4o-mini", "stop": [], "tools": [{"type": "function", "function": {"name":
|
||||
"failing_tool", "description": "This tool always fails.", "parameters": {"type":
|
||||
"object", "properties": {"param": {"type": "string", "description": "A test
|
||||
parameter"}}, "required": ["param"]}}}]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '353'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.61.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.61.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.8
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-B2P4zoJZuES7Aom8ugEq1modz5Vsl\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1739912761,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
|
||||
\ \"id\": \"call_F6fJxISpMKUBIGV6dd2vjRNG\",\n \"type\":
|
||||
\"function\",\n \"function\": {\n \"name\": \"failing_tool\",\n
|
||||
\ \"arguments\": \"{\\\"param\\\":\\\"test\\\"}\"\n }\n
|
||||
\ }\n ],\n \"refusal\": null\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"tool_calls\"\n }\n ],\n \"usage\": {\n
|
||||
\ \"prompt_tokens\": 51,\n \"completion_tokens\": 15,\n \"total_tokens\":
|
||||
66,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\":
|
||||
0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\":
|
||||
0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\":
|
||||
0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\":
|
||||
\"fp_00428b782a\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 9140fa827f38eb1e-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 18 Feb 2025 21:06:02 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=xbuu3IQpCMh.43ZrqL1TRMECOc6QldgHV0hzOX1GrWI-1739912762-1.0.1.1-t7iyq5xMioPrwfeaHLvPT9rwRPp7Q9A9uIm69icH9dPxRD4xMA3cWqb1aXj1_e2IyAEQQWFe1UWjlmJ22aHh3Q;
|
||||
path=/; expires=Tue, 18-Feb-25 21:36:02 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=x9l.Rhja8_wXDN.j8qcEU1PvvEqAwZp4Fd3s_aj4qwM-1739912762161-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '861'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999978'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_8666ec3aa6677cb346ba00993556051d
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
version: 1
|
||||
@@ -3,6 +3,7 @@
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import tempfile
|
||||
from concurrent.futures import Future
|
||||
from unittest import mock
|
||||
from unittest.mock import MagicMock, patch
|
||||
@@ -19,6 +20,7 @@ from crewai.crews.crew_output import CrewOutput
|
||||
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
|
||||
from crewai.llm import LLM
|
||||
from crewai.memory.contextual.contextual_memory import ContextualMemory
|
||||
from crewai.memory.short_term.short_term_memory import ShortTermMemory
|
||||
from crewai.process import Process
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.conditional_task import ConditionalTask
|
||||
@@ -4116,6 +4118,54 @@ def test_crew_kickoff_for_each_works_with_manager_agent_copy():
|
||||
assert crew_copy.manager_agent.id != crew.manager_agent.id
|
||||
assert crew_copy.manager_agent.role == crew.manager_agent.role
|
||||
assert crew_copy.manager_agent.goal == crew.manager_agent.goal
|
||||
assert crew_copy.manager_agent.backstory == crew.manager_agent.backstory
|
||||
assert isinstance(crew_copy.manager_agent.agent_executor, CrewAgentExecutor)
|
||||
assert isinstance(crew_copy.manager_agent.cache_handler, CacheHandler)
|
||||
|
||||
def test_crew_copy_with_memory():
|
||||
"""Test that copying a crew with memory enabled does not raise validation errors and copies memory correctly."""
|
||||
agent = Agent(role="Test Agent", goal="Test Goal", backstory="Test Backstory")
|
||||
task = Task(description="Test Task", expected_output="Test Output", agent=agent)
|
||||
crew = Crew(agents=[agent], tasks=[task], memory=True)
|
||||
|
||||
original_short_term_id = id(crew._short_term_memory) if crew._short_term_memory else None
|
||||
original_long_term_id = id(crew._long_term_memory) if crew._long_term_memory else None
|
||||
original_entity_id = id(crew._entity_memory) if crew._entity_memory else None
|
||||
original_external_id = id(crew._external_memory) if crew._external_memory else None
|
||||
original_user_id = id(crew._user_memory) if crew._user_memory else None
|
||||
|
||||
|
||||
try:
|
||||
crew_copy = crew.copy()
|
||||
|
||||
assert hasattr(crew_copy, "_short_term_memory"), "Copied crew should have _short_term_memory"
|
||||
assert crew_copy._short_term_memory is not None, "Copied _short_term_memory should not be None"
|
||||
assert id(crew_copy._short_term_memory) != original_short_term_id, "Copied _short_term_memory should be a new object"
|
||||
|
||||
assert hasattr(crew_copy, "_long_term_memory"), "Copied crew should have _long_term_memory"
|
||||
assert crew_copy._long_term_memory is not None, "Copied _long_term_memory should not be None"
|
||||
assert id(crew_copy._long_term_memory) != original_long_term_id, "Copied _long_term_memory should be a new object"
|
||||
|
||||
assert hasattr(crew_copy, "_entity_memory"), "Copied crew should have _entity_memory"
|
||||
assert crew_copy._entity_memory is not None, "Copied _entity_memory should not be None"
|
||||
assert id(crew_copy._entity_memory) != original_entity_id, "Copied _entity_memory should be a new object"
|
||||
|
||||
if original_external_id:
|
||||
assert hasattr(crew_copy, "_external_memory"), "Copied crew should have _external_memory"
|
||||
assert crew_copy._external_memory is not None, "Copied _external_memory should not be None"
|
||||
assert id(crew_copy._external_memory) != original_external_id, "Copied _external_memory should be a new object"
|
||||
else:
|
||||
assert not hasattr(crew_copy, "_external_memory") or crew_copy._external_memory is None, "Copied _external_memory should be None if not originally present"
|
||||
|
||||
if original_user_id:
|
||||
assert hasattr(crew_copy, "_user_memory"), "Copied crew should have _user_memory"
|
||||
assert crew_copy._user_memory is not None, "Copied _user_memory should not be None"
|
||||
assert id(crew_copy._user_memory) != original_user_id, "Copied _user_memory should be a new object"
|
||||
else:
|
||||
assert not hasattr(crew_copy, "_user_memory") or crew_copy._user_memory is None, "Copied _user_memory should be None if not originally present"
|
||||
|
||||
|
||||
except pydantic_core.ValidationError as e:
|
||||
if "Input should be an instance of" in str(e) and ("Memory" in str(e)):
|
||||
pytest.fail(f"Copying with memory raised Pydantic ValidationError, likely due to incorrect memory copy: {e}")
|
||||
else:
|
||||
raise e # Re-raise other validation errors
|
||||
except Exception as e:
|
||||
pytest.fail(f"Copying crew raised an unexpected exception: {e}")
|
||||
|
||||
@@ -395,51 +395,3 @@ def test_deepseek_r1_with_open_router():
|
||||
result = llm.call("What is the capital of France?")
|
||||
assert isinstance(result, str)
|
||||
assert "Paris" in result
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_tool_execution_error_event():
|
||||
llm = LLM(model="gpt-4o-mini")
|
||||
|
||||
def failing_tool(param: str) -> str:
|
||||
"""This tool always fails."""
|
||||
raise Exception("Tool execution failed!")
|
||||
|
||||
tool_schema = {
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "failing_tool",
|
||||
"description": "This tool always fails.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"param": {"type": "string", "description": "A test parameter"}
|
||||
},
|
||||
"required": ["param"],
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(ToolExecutionErrorEvent)
|
||||
def event_handler(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
available_functions = {"failing_tool": failing_tool}
|
||||
|
||||
messages = [{"role": "user", "content": "Use the failing tool"}]
|
||||
|
||||
llm.call(
|
||||
messages,
|
||||
tools=[tool_schema],
|
||||
available_functions=available_functions,
|
||||
)
|
||||
|
||||
assert len(received_events) == 1
|
||||
event = received_events[0]
|
||||
assert isinstance(event, ToolExecutionErrorEvent)
|
||||
assert event.tool_name == "failing_tool"
|
||||
assert event.tool_args == {"param": "test"}
|
||||
assert event.tool_class == failing_tool
|
||||
assert "Tool execution failed!" in event.error
|
||||
|
||||
@@ -29,7 +29,7 @@ def mem0_storage_with_mocked_config(mock_mem0_memory):
|
||||
"""Fixture to create a Mem0Storage instance with mocked dependencies"""
|
||||
|
||||
# Patch the Memory class to return our mock
|
||||
with patch("mem0.memory.main.Memory.from_config", return_value=mock_mem0_memory):
|
||||
with patch("mem0.memory.main.Memory.from_config", return_value=mock_mem0_memory) as mock_from_config:
|
||||
config = {
|
||||
"vector_store": {
|
||||
"provider": "mock_vector_store",
|
||||
@@ -66,13 +66,15 @@ def mem0_storage_with_mocked_config(mock_mem0_memory):
|
||||
)
|
||||
|
||||
mem0_storage = Mem0Storage(type="short_term", crew=crew)
|
||||
return mem0_storage
|
||||
return mem0_storage, mock_from_config, config
|
||||
|
||||
|
||||
def test_mem0_storage_initialization(mem0_storage_with_mocked_config, mock_mem0_memory):
|
||||
"""Test that Mem0Storage initializes correctly with the mocked config"""
|
||||
assert mem0_storage_with_mocked_config.memory_type == "short_term"
|
||||
assert mem0_storage_with_mocked_config.memory is mock_mem0_memory
|
||||
mem0_storage, mock_from_config, config = mem0_storage_with_mocked_config
|
||||
assert mem0_storage.memory_type == "short_term"
|
||||
assert mem0_storage.memory is mock_mem0_memory
|
||||
mock_from_config.assert_called_once_with(config)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
from typing import Callable
|
||||
import asyncio
|
||||
import inspect
|
||||
import unittest
|
||||
from typing import Any, Callable, Dict, List
|
||||
from unittest.mock import patch
|
||||
|
||||
from crewai.tools import BaseTool, tool
|
||||
|
||||
@@ -122,3 +126,69 @@ def test_result_as_answer_in_tool_decorator():
|
||||
|
||||
converted_tool = my_tool_with_default.to_structured_tool()
|
||||
assert converted_tool.result_as_answer is False
|
||||
|
||||
|
||||
class SyncTool(BaseTool):
|
||||
"""Test implementation with a synchronous _run method"""
|
||||
name: str = "sync_tool"
|
||||
description: str = "A synchronous tool for testing"
|
||||
|
||||
def _run(self, input_text: str) -> str:
|
||||
"""Process input text synchronously."""
|
||||
return f"Processed {input_text} synchronously"
|
||||
|
||||
|
||||
class AsyncTool(BaseTool):
|
||||
"""Test implementation with an asynchronous _run method"""
|
||||
name: str = "async_tool"
|
||||
description: str = "An asynchronous tool for testing"
|
||||
|
||||
async def _run(self, input_text: str) -> str:
|
||||
"""Process input text asynchronously."""
|
||||
await asyncio.sleep(0.1) # Simulate async operation
|
||||
return f"Processed {input_text} asynchronously"
|
||||
|
||||
|
||||
def test_sync_run_returns_direct_result():
|
||||
"""Test that _run in a synchronous tool returns a direct result, not a coroutine."""
|
||||
tool = SyncTool()
|
||||
result = tool._run(input_text="hello")
|
||||
|
||||
assert not asyncio.iscoroutine(result)
|
||||
assert result == "Processed hello synchronously"
|
||||
|
||||
run_result = tool.run(input_text="hello")
|
||||
assert run_result == "Processed hello synchronously"
|
||||
|
||||
|
||||
def test_async_run_returns_coroutine():
|
||||
"""Test that _run in an asynchronous tool returns a coroutine object."""
|
||||
tool = AsyncTool()
|
||||
result = tool._run(input_text="hello")
|
||||
|
||||
assert asyncio.iscoroutine(result)
|
||||
result.close() # Clean up the coroutine
|
||||
|
||||
|
||||
def test_run_calls_asyncio_run_for_async_tools():
|
||||
"""Test that asyncio.run is called when using async tools."""
|
||||
async_tool = AsyncTool()
|
||||
|
||||
with patch('asyncio.run') as mock_run:
|
||||
mock_run.return_value = "Processed test asynchronously"
|
||||
async_result = async_tool.run(input_text="test")
|
||||
|
||||
mock_run.assert_called_once()
|
||||
assert async_result == "Processed test asynchronously"
|
||||
|
||||
|
||||
def test_run_does_not_call_asyncio_run_for_sync_tools():
|
||||
"""Test that asyncio.run is NOT called when using sync tools."""
|
||||
sync_tool = SyncTool()
|
||||
|
||||
with patch('asyncio.run') as mock_run:
|
||||
sync_result = sync_tool.run(input_text="test")
|
||||
|
||||
mock_run.assert_not_called()
|
||||
assert sync_result == "Processed test synchronously"
|
||||
|
||||
|
||||
Reference in New Issue
Block a user