mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-10 16:48:30 +00:00
Merge branch 'main' into lorenze/trace-improvements-3
This commit is contained in:
20
.github/workflows/tests.yml
vendored
20
.github/workflows/tests.yml
vendored
@@ -45,14 +45,34 @@ jobs:
|
||||
- name: Install the project
|
||||
run: uv sync --all-groups --all-extras
|
||||
|
||||
- name: Restore test durations
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: .test_durations_py*
|
||||
key: test-durations-py${{ matrix.python-version }}-
|
||||
restore-keys: |
|
||||
test-durations-py${{ matrix.python-version }}-
|
||||
|
||||
- name: Run tests (group ${{ matrix.group }} of 8)
|
||||
run: |
|
||||
PYTHON_VERSION_SAFE=$(echo "${{ matrix.python-version }}" | tr '.' '_')
|
||||
DURATION_FILE=".test_durations_py${PYTHON_VERSION_SAFE}"
|
||||
|
||||
if [ -f "$DURATION_FILE" ]; then
|
||||
echo "Using cached test durations for optimal splitting"
|
||||
DURATIONS_ARG="--durations-path=${DURATION_FILE}"
|
||||
else
|
||||
echo "No cached durations found, tests will be split evenly"
|
||||
DURATIONS_ARG=""
|
||||
fi
|
||||
|
||||
uv run pytest \
|
||||
--block-network \
|
||||
--timeout=30 \
|
||||
-vv \
|
||||
--splits 8 \
|
||||
--group ${{ matrix.group }} \
|
||||
$DURATIONS_ARG \
|
||||
--durations=10 \
|
||||
-n auto \
|
||||
--maxfail=3
|
||||
|
||||
71
.github/workflows/update-test-durations.yml
vendored
Normal file
71
.github/workflows/update-test-durations.yml
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
name: Update Test Durations
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- 'tests/**/*.py'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
update-durations:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ['3.10', '3.11', '3.12', '3.13']
|
||||
env:
|
||||
OPENAI_API_KEY: fake-api-key
|
||||
PYTHONUNBUFFERED: 1
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Restore global uv cache
|
||||
id: cache-restore
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
|
||||
restore-keys: |
|
||||
uv-main-py${{ matrix.python-version }}-
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
version: "0.8.4"
|
||||
python-version: ${{ matrix.python-version }}
|
||||
enable-cache: false
|
||||
|
||||
- name: Install the project
|
||||
run: uv sync --all-groups --all-extras
|
||||
|
||||
- name: Run all tests and store durations
|
||||
run: |
|
||||
PYTHON_VERSION_SAFE=$(echo "${{ matrix.python-version }}" | tr '.' '_')
|
||||
uv run pytest --store-durations --durations-path=.test_durations_py${PYTHON_VERSION_SAFE} -n auto
|
||||
continue-on-error: true
|
||||
|
||||
- name: Save durations to cache
|
||||
if: always()
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: .test_durations_py*
|
||||
key: test-durations-py${{ matrix.python-version }}-${{ github.sha }}
|
||||
|
||||
- name: Save uv caches
|
||||
if: steps.cache-restore.outputs.cache-hit != 'true'
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
|
||||
@@ -48,7 +48,7 @@ Documentation = "https://docs.crewai.com"
|
||||
Repository = "https://github.com/crewAIInc/crewAI"
|
||||
|
||||
[project.optional-dependencies]
|
||||
tools = ["crewai-tools~=0.69.0"]
|
||||
tools = ["crewai-tools~=0.71.0"]
|
||||
embeddings = [
|
||||
"tiktoken~=0.8.0"
|
||||
]
|
||||
|
||||
@@ -1,6 +1,21 @@
|
||||
import threading
|
||||
import urllib.request
|
||||
import warnings
|
||||
from typing import Any
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.crew import Crew
|
||||
from crewai.crews.crew_output import CrewOutput
|
||||
from crewai.flow.flow import Flow
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.llm import LLM
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.process import Process
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.llm_guardrail import LLMGuardrail
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.telemetry.telemetry import Telemetry
|
||||
|
||||
|
||||
def _suppress_pydantic_deprecation_warnings() -> None:
|
||||
"""Suppress Pydantic deprecation warnings using targeted monkey patch."""
|
||||
@@ -20,27 +35,12 @@ def _suppress_pydantic_deprecation_warnings() -> None:
|
||||
return None
|
||||
return original_warn(message, category, stacklevel + 1, source)
|
||||
|
||||
setattr(warnings, "warn", filtered_warn)
|
||||
warnings.warn = filtered_warn # type: ignore[assignment]
|
||||
|
||||
|
||||
_suppress_pydantic_deprecation_warnings()
|
||||
|
||||
import threading
|
||||
import urllib.request
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.crew import Crew
|
||||
from crewai.crews.crew_output import CrewOutput
|
||||
from crewai.flow.flow import Flow
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.llm import LLM
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.process import Process
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.llm_guardrail import LLMGuardrail
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.telemetry.telemetry import Telemetry
|
||||
|
||||
__version__ = "0.186.1"
|
||||
_telemetry_submitted = False
|
||||
|
||||
|
||||
@@ -54,13 +54,12 @@ def _track_install() -> None:
|
||||
try:
|
||||
pixel_url = "https://api.scarf.sh/v2/packages/CrewAI/crewai/docs/00f2dad1-8334-4a39-934e-003b2e1146db"
|
||||
|
||||
req = urllib.request.Request(pixel_url)
|
||||
req = urllib.request.Request(pixel_url) # noqa: S310
|
||||
req.add_header("User-Agent", f"CrewAI-Python/{__version__}")
|
||||
|
||||
with urllib.request.urlopen(req, timeout=2): # nosec B310
|
||||
with urllib.request.urlopen(req, timeout=2): # noqa: S310
|
||||
_telemetry_submitted = True
|
||||
|
||||
except Exception:
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
|
||||
|
||||
@@ -72,19 +71,17 @@ def _track_install_async() -> None:
|
||||
|
||||
|
||||
_track_install_async()
|
||||
|
||||
__version__ = "0.177.0"
|
||||
__all__ = [
|
||||
"LLM",
|
||||
"Agent",
|
||||
"BaseLLM",
|
||||
"Crew",
|
||||
"CrewOutput",
|
||||
"Process",
|
||||
"Task",
|
||||
"LLM",
|
||||
"BaseLLM",
|
||||
"Flow",
|
||||
"Knowledge",
|
||||
"TaskOutput",
|
||||
"LLMGuardrail",
|
||||
"Process",
|
||||
"Task",
|
||||
"TaskOutput",
|
||||
"__version__",
|
||||
]
|
||||
|
||||
@@ -1,47 +1,56 @@
|
||||
from typing import Any, Dict, List, Optional
|
||||
"""LangGraph agent adapter for CrewAI integration.
|
||||
|
||||
from pydantic import Field, PrivateAttr
|
||||
This module contains the LangGraphAgentAdapter class that integrates LangGraph ReAct agents
|
||||
with CrewAI's agent system. Provides memory persistence, tool integration, and structured
|
||||
output functionality.
|
||||
"""
|
||||
|
||||
from collections.abc import Callable
|
||||
from typing import Any, cast
|
||||
|
||||
from pydantic import ConfigDict, Field, PrivateAttr
|
||||
|
||||
from crewai.agents.agent_adapters.base_agent_adapter import BaseAgentAdapter
|
||||
from crewai.agents.agent_adapters.langgraph.langgraph_tool_adapter import (
|
||||
LangGraphToolAdapter,
|
||||
)
|
||||
from crewai.agents.agent_adapters.langgraph.protocols import (
|
||||
LangGraphCheckPointMemoryModule,
|
||||
LangGraphPrebuiltModule,
|
||||
)
|
||||
from crewai.agents.agent_adapters.langgraph.structured_output_converter import (
|
||||
LangGraphConverterAdapter,
|
||||
)
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.utilities import Logger
|
||||
from crewai.utilities.converter import Converter
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.agent_events import (
|
||||
AgentExecutionCompletedEvent,
|
||||
AgentExecutionErrorEvent,
|
||||
AgentExecutionStartedEvent,
|
||||
)
|
||||
|
||||
try:
|
||||
from langgraph.checkpoint.memory import MemorySaver
|
||||
from langgraph.prebuilt import create_react_agent
|
||||
|
||||
LANGGRAPH_AVAILABLE = True
|
||||
except ImportError:
|
||||
LANGGRAPH_AVAILABLE = False
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.utilities import Logger
|
||||
from crewai.utilities.converter import Converter
|
||||
from crewai.utilities.import_utils import require
|
||||
|
||||
|
||||
class LangGraphAgentAdapter(BaseAgentAdapter):
|
||||
"""Adapter for LangGraph agents to work with CrewAI."""
|
||||
"""Adapter for LangGraph agents to work with CrewAI.
|
||||
|
||||
model_config = {"arbitrary_types_allowed": True}
|
||||
This adapter integrates LangGraph's ReAct agents with CrewAI's agent system,
|
||||
providing memory persistence, tool integration, and structured output support.
|
||||
"""
|
||||
|
||||
_logger: Logger = PrivateAttr(default_factory=lambda: Logger())
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
_logger: Logger = PrivateAttr(default_factory=Logger)
|
||||
_tool_adapter: LangGraphToolAdapter = PrivateAttr()
|
||||
_graph: Any = PrivateAttr(default=None)
|
||||
_memory: Any = PrivateAttr(default=None)
|
||||
_max_iterations: int = PrivateAttr(default=10)
|
||||
function_calling_llm: Any = Field(default=None)
|
||||
step_callback: Any = Field(default=None)
|
||||
step_callback: Callable[..., Any] | None = Field(default=None)
|
||||
|
||||
model: str = Field(default="gpt-4o")
|
||||
verbose: bool = Field(default=False)
|
||||
@@ -51,17 +60,24 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
|
||||
role: str,
|
||||
goal: str,
|
||||
backstory: str,
|
||||
tools: Optional[List[BaseTool]] = None,
|
||||
tools: list[BaseTool] | None = None,
|
||||
llm: Any = None,
|
||||
max_iterations: int = 10,
|
||||
agent_config: Optional[Dict[str, Any]] = None,
|
||||
agent_config: dict[str, Any] | None = None,
|
||||
**kwargs,
|
||||
):
|
||||
"""Initialize the LangGraph agent adapter."""
|
||||
if not LANGGRAPH_AVAILABLE:
|
||||
raise ImportError(
|
||||
"LangGraph Agent Dependencies are not installed. Please install it using `uv add langchain-core langgraph`"
|
||||
)
|
||||
) -> None:
|
||||
"""Initialize the LangGraph agent adapter.
|
||||
|
||||
Args:
|
||||
role: The role description for the agent.
|
||||
goal: The primary goal the agent should achieve.
|
||||
backstory: Background information about the agent.
|
||||
tools: Optional list of tools available to the agent.
|
||||
llm: Language model to use, defaults to gpt-4o.
|
||||
max_iterations: Maximum number of iterations for task execution.
|
||||
agent_config: Additional configuration for the LangGraph agent.
|
||||
**kwargs: Additional arguments passed to the base adapter.
|
||||
"""
|
||||
super().__init__(
|
||||
role=role,
|
||||
goal=goal,
|
||||
@@ -72,46 +88,65 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
|
||||
**kwargs,
|
||||
)
|
||||
self._tool_adapter = LangGraphToolAdapter(tools=tools)
|
||||
self._converter_adapter = LangGraphConverterAdapter(self)
|
||||
self._converter_adapter: LangGraphConverterAdapter = LangGraphConverterAdapter(
|
||||
self
|
||||
)
|
||||
self._max_iterations = max_iterations
|
||||
self._setup_graph()
|
||||
|
||||
def _setup_graph(self) -> None:
|
||||
"""Set up the LangGraph workflow graph."""
|
||||
try:
|
||||
self._memory = MemorySaver()
|
||||
"""Set up the LangGraph workflow graph.
|
||||
|
||||
converted_tools: List[Any] = self._tool_adapter.tools()
|
||||
if self._agent_config:
|
||||
self._graph = create_react_agent(
|
||||
model=self.llm,
|
||||
tools=converted_tools,
|
||||
checkpointer=self._memory,
|
||||
debug=self.verbose,
|
||||
**self._agent_config,
|
||||
)
|
||||
else:
|
||||
self._graph = create_react_agent(
|
||||
model=self.llm,
|
||||
tools=converted_tools or [],
|
||||
checkpointer=self._memory,
|
||||
debug=self.verbose,
|
||||
)
|
||||
Initializes the memory saver and creates a ReAct agent with the configured
|
||||
tools, memory checkpointer, and debug settings.
|
||||
"""
|
||||
|
||||
except ImportError as e:
|
||||
self._logger.log(
|
||||
"error", f"Failed to import LangGraph dependencies: {str(e)}"
|
||||
memory_saver: type[Any] = cast(
|
||||
LangGraphCheckPointMemoryModule,
|
||||
require(
|
||||
"langgraph.checkpoint.memory",
|
||||
purpose="LangGraph core functionality",
|
||||
),
|
||||
).MemorySaver
|
||||
create_react_agent: Callable[..., Any] = cast(
|
||||
LangGraphPrebuiltModule,
|
||||
require(
|
||||
"langgraph.prebuilt",
|
||||
purpose="LangGraph core functionality",
|
||||
),
|
||||
).create_react_agent
|
||||
|
||||
self._memory = memory_saver()
|
||||
|
||||
converted_tools: list[Any] = self._tool_adapter.tools()
|
||||
if self._agent_config:
|
||||
self._graph = create_react_agent(
|
||||
model=self.llm,
|
||||
tools=converted_tools,
|
||||
checkpointer=self._memory,
|
||||
debug=self.verbose,
|
||||
**self._agent_config,
|
||||
)
|
||||
else:
|
||||
self._graph = create_react_agent(
|
||||
model=self.llm,
|
||||
tools=converted_tools or [],
|
||||
checkpointer=self._memory,
|
||||
debug=self.verbose,
|
||||
)
|
||||
raise
|
||||
except Exception as e:
|
||||
self._logger.log("error", f"Error setting up LangGraph agent: {str(e)}")
|
||||
raise
|
||||
|
||||
def _build_system_prompt(self) -> str:
|
||||
"""Build a system prompt for the LangGraph agent."""
|
||||
"""Build a system prompt for the LangGraph agent.
|
||||
|
||||
Creates a prompt that includes the agent's role, goal, and backstory,
|
||||
then enhances it through the converter adapter for structured output.
|
||||
|
||||
Returns:
|
||||
The complete system prompt string.
|
||||
"""
|
||||
base_prompt = f"""
|
||||
You are {self.role}.
|
||||
|
||||
|
||||
Your goal is: {self.goal}
|
||||
|
||||
Your backstory: {self.backstory}
|
||||
@@ -123,10 +158,25 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
|
||||
def execute_task(
|
||||
self,
|
||||
task: Any,
|
||||
context: Optional[str] = None,
|
||||
tools: Optional[List[BaseTool]] = None,
|
||||
context: str | None = None,
|
||||
tools: list[BaseTool] | None = None,
|
||||
) -> str:
|
||||
"""Execute a task using the LangGraph workflow."""
|
||||
"""Execute a task using the LangGraph workflow.
|
||||
|
||||
Configures the agent, processes the task through the LangGraph workflow,
|
||||
and handles event emission for execution tracking.
|
||||
|
||||
Args:
|
||||
task: The task object to execute.
|
||||
context: Optional context information for the task.
|
||||
tools: Optional additional tools for this specific execution.
|
||||
|
||||
Returns:
|
||||
The final answer from the task execution.
|
||||
|
||||
Raises:
|
||||
Exception: If task execution fails.
|
||||
"""
|
||||
self.create_agent_executor(tools)
|
||||
|
||||
self.configure_structured_output(task)
|
||||
@@ -151,9 +201,11 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
|
||||
|
||||
session_id = f"task_{id(task)}"
|
||||
|
||||
config = {"configurable": {"thread_id": session_id}}
|
||||
config: dict[str, dict[str, str]] = {
|
||||
"configurable": {"thread_id": session_id}
|
||||
}
|
||||
|
||||
result = self._graph.invoke(
|
||||
result: dict[str, Any] = self._graph.invoke(
|
||||
{
|
||||
"messages": [
|
||||
("system", self._build_system_prompt()),
|
||||
@@ -163,10 +215,10 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
|
||||
config,
|
||||
)
|
||||
|
||||
messages = result.get("messages", [])
|
||||
last_message = messages[-1] if messages else None
|
||||
messages: list[Any] = result.get("messages", [])
|
||||
last_message: Any = messages[-1] if messages else None
|
||||
|
||||
final_answer = ""
|
||||
final_answer: str = ""
|
||||
if isinstance(last_message, dict):
|
||||
final_answer = last_message.get("content", "")
|
||||
elif hasattr(last_message, "content"):
|
||||
@@ -186,7 +238,7 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
|
||||
return final_answer
|
||||
|
||||
except Exception as e:
|
||||
self._logger.log("error", f"Error executing LangGraph task: {str(e)}")
|
||||
self._logger.log("error", f"Error executing LangGraph task: {e!s}")
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=AgentExecutionErrorEvent(
|
||||
@@ -197,29 +249,67 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
|
||||
)
|
||||
raise
|
||||
|
||||
def create_agent_executor(self, tools: Optional[List[BaseTool]] = None) -> None:
|
||||
"""Configure the LangGraph agent for execution."""
|
||||
def create_agent_executor(self, tools: list[BaseTool] | None = None) -> None:
|
||||
"""Configure the LangGraph agent for execution.
|
||||
|
||||
Args:
|
||||
tools: Optional tools to configure for the agent.
|
||||
"""
|
||||
self.configure_tools(tools)
|
||||
|
||||
def configure_tools(self, tools: Optional[List[BaseTool]] = None) -> None:
|
||||
"""Configure tools for the LangGraph agent."""
|
||||
def configure_tools(self, tools: list[BaseTool] | None = None) -> None:
|
||||
"""Configure tools for the LangGraph agent.
|
||||
|
||||
Merges additional tools with existing ones and updates the graph's
|
||||
available tools through the tool adapter.
|
||||
|
||||
Args:
|
||||
tools: Optional additional tools to configure.
|
||||
"""
|
||||
if tools:
|
||||
all_tools = list(self.tools or []) + list(tools or [])
|
||||
all_tools: list[BaseTool] = list(self.tools or []) + list(tools or [])
|
||||
self._tool_adapter.configure_tools(all_tools)
|
||||
available_tools = self._tool_adapter.tools()
|
||||
available_tools: list[Any] = self._tool_adapter.tools()
|
||||
self._graph.tools = available_tools
|
||||
|
||||
def get_delegation_tools(self, agents: List[BaseAgent]) -> List[BaseTool]:
|
||||
"""Implement delegation tools support for LangGraph."""
|
||||
agent_tools = AgentTools(agents=agents)
|
||||
def get_delegation_tools(self, agents: list[BaseAgent]) -> list[BaseTool]:
|
||||
"""Implement delegation tools support for LangGraph.
|
||||
|
||||
Creates delegation tools that allow this agent to delegate tasks to other agents.
|
||||
|
||||
Args:
|
||||
agents: List of agents available for delegation.
|
||||
|
||||
Returns:
|
||||
List of delegation tools.
|
||||
"""
|
||||
agent_tools: AgentTools = AgentTools(agents=agents)
|
||||
return agent_tools.tools()
|
||||
|
||||
@staticmethod
|
||||
def get_output_converter(
|
||||
self, llm: Any, text: str, model: Any, instructions: str
|
||||
) -> Any:
|
||||
"""Convert output format if needed."""
|
||||
llm: Any, text: str, model: Any, instructions: str
|
||||
) -> Converter:
|
||||
"""Convert output format if needed.
|
||||
|
||||
Args:
|
||||
llm: Language model instance.
|
||||
text: Text to convert.
|
||||
model: Model configuration.
|
||||
instructions: Conversion instructions.
|
||||
|
||||
Returns:
|
||||
Converter instance for output transformation.
|
||||
"""
|
||||
return Converter(llm=llm, text=text, model=model, instructions=instructions)
|
||||
|
||||
def configure_structured_output(self, task) -> None:
|
||||
"""Configure the structured output for LangGraph."""
|
||||
def configure_structured_output(self, task: Any) -> None:
|
||||
"""Configure the structured output for LangGraph.
|
||||
|
||||
Uses the converter adapter to set up structured output formatting
|
||||
based on the task requirements.
|
||||
|
||||
Args:
|
||||
task: Task object containing output requirements.
|
||||
"""
|
||||
self._converter_adapter.configure_structured_output(task)
|
||||
|
||||
@@ -1,38 +1,72 @@
|
||||
"""LangGraph tool adapter for CrewAI tool integration.
|
||||
|
||||
This module contains the LangGraphToolAdapter class that converts CrewAI tools
|
||||
to LangGraph-compatible format using langchain_core.tools.
|
||||
"""
|
||||
|
||||
import inspect
|
||||
from typing import Any, List, Optional
|
||||
from collections.abc import Awaitable
|
||||
from typing import Any
|
||||
|
||||
from crewai.agents.agent_adapters.base_tool_adapter import BaseToolAdapter
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
|
||||
|
||||
class LangGraphToolAdapter(BaseToolAdapter):
|
||||
"""Adapts CrewAI tools to LangGraph agent tool compatible format"""
|
||||
"""Adapts CrewAI tools to LangGraph agent tool compatible format.
|
||||
|
||||
def __init__(self, tools: Optional[List[BaseTool]] = None):
|
||||
self.original_tools = tools or []
|
||||
self.converted_tools = []
|
||||
Converts CrewAI BaseTool instances to langchain_core.tools format
|
||||
that can be used by LangGraph agents.
|
||||
"""
|
||||
|
||||
def configure_tools(self, tools: List[BaseTool]) -> None:
|
||||
def __init__(self, tools: list[BaseTool] | None = None) -> None:
|
||||
"""Initialize the tool adapter.
|
||||
|
||||
Args:
|
||||
tools: Optional list of CrewAI tools to adapt.
|
||||
"""
|
||||
Configure and convert CrewAI tools to LangGraph-compatible format.
|
||||
LangGraph expects tools in langchain_core.tools format.
|
||||
"""
|
||||
from langchain_core.tools import BaseTool, StructuredTool
|
||||
super().__init__()
|
||||
self.original_tools: list[BaseTool] = tools or []
|
||||
self.converted_tools: list[Any] = []
|
||||
|
||||
converted_tools = []
|
||||
def configure_tools(self, tools: list[BaseTool]) -> None:
|
||||
"""Configure and convert CrewAI tools to LangGraph-compatible format.
|
||||
|
||||
LangGraph expects tools in langchain_core.tools format. This method
|
||||
converts CrewAI BaseTool instances to StructuredTool instances.
|
||||
|
||||
Args:
|
||||
tools: List of CrewAI tools to convert.
|
||||
"""
|
||||
from langchain_core.tools import BaseTool as LangChainBaseTool
|
||||
from langchain_core.tools import StructuredTool
|
||||
|
||||
converted_tools: list[Any] = []
|
||||
if self.original_tools:
|
||||
all_tools = tools + self.original_tools
|
||||
all_tools: list[BaseTool] = tools + self.original_tools
|
||||
else:
|
||||
all_tools = tools
|
||||
for tool in all_tools:
|
||||
if isinstance(tool, BaseTool):
|
||||
if isinstance(tool, LangChainBaseTool):
|
||||
converted_tools.append(tool)
|
||||
continue
|
||||
|
||||
sanitized_name = self.sanitize_tool_name(tool.name)
|
||||
sanitized_name: str = self.sanitize_tool_name(tool.name)
|
||||
|
||||
async def tool_wrapper(*args, tool=tool, **kwargs):
|
||||
output = None
|
||||
async def tool_wrapper(
|
||||
*args: Any, tool: BaseTool = tool, **kwargs: Any
|
||||
) -> Any:
|
||||
"""Wrapper function to adapt CrewAI tool calls to LangGraph format.
|
||||
|
||||
Args:
|
||||
*args: Positional arguments for the tool.
|
||||
tool: The CrewAI tool to wrap.
|
||||
**kwargs: Keyword arguments for the tool.
|
||||
|
||||
Returns:
|
||||
The result from the tool execution.
|
||||
"""
|
||||
output: Any | Awaitable[Any]
|
||||
if len(args) > 0 and isinstance(args[0], str):
|
||||
output = tool.run(args[0])
|
||||
elif "input" in kwargs:
|
||||
@@ -41,12 +75,12 @@ class LangGraphToolAdapter(BaseToolAdapter):
|
||||
output = tool.run(**kwargs)
|
||||
|
||||
if inspect.isawaitable(output):
|
||||
result = await output
|
||||
result: Any = await output
|
||||
else:
|
||||
result = output
|
||||
return result
|
||||
|
||||
converted_tool = StructuredTool(
|
||||
converted_tool: StructuredTool = StructuredTool(
|
||||
name=sanitized_name,
|
||||
description=tool.description,
|
||||
func=tool_wrapper,
|
||||
@@ -57,5 +91,10 @@ class LangGraphToolAdapter(BaseToolAdapter):
|
||||
|
||||
self.converted_tools = converted_tools
|
||||
|
||||
def tools(self) -> List[Any]:
|
||||
def tools(self) -> list[Any]:
|
||||
"""Get the list of converted tools.
|
||||
|
||||
Returns:
|
||||
List of LangGraph-compatible tools.
|
||||
"""
|
||||
return self.converted_tools or []
|
||||
|
||||
55
src/crewai/agents/agent_adapters/langgraph/protocols.py
Normal file
55
src/crewai/agents/agent_adapters/langgraph/protocols.py
Normal file
@@ -0,0 +1,55 @@
|
||||
"""Type protocols for LangGraph modules."""
|
||||
|
||||
from typing import Any, Protocol, runtime_checkable
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class LangGraphMemorySaver(Protocol):
|
||||
"""Protocol for LangGraph MemorySaver.
|
||||
|
||||
Defines the interface for LangGraph's memory persistence mechanism.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize the memory saver."""
|
||||
...
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class LangGraphCheckPointMemoryModule(Protocol):
|
||||
"""Protocol for LangGraph checkpoint memory module.
|
||||
|
||||
Defines the interface for modules containing memory checkpoint functionality.
|
||||
"""
|
||||
|
||||
MemorySaver: type[LangGraphMemorySaver]
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class LangGraphPrebuiltModule(Protocol):
|
||||
"""Protocol for LangGraph prebuilt module.
|
||||
|
||||
Defines the interface for modules containing prebuilt agent factories.
|
||||
"""
|
||||
|
||||
def create_react_agent(
|
||||
self,
|
||||
model: Any,
|
||||
tools: list[Any],
|
||||
checkpointer: Any,
|
||||
debug: bool = False,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Create a ReAct agent with the given configuration.
|
||||
|
||||
Args:
|
||||
model: The language model to use for the agent.
|
||||
tools: List of tools available to the agent.
|
||||
checkpointer: Memory checkpointer for state persistence.
|
||||
debug: Whether to enable debug mode.
|
||||
**kwargs: Additional configuration options.
|
||||
|
||||
Returns:
|
||||
The configured ReAct agent instance.
|
||||
"""
|
||||
...
|
||||
@@ -1,21 +1,45 @@
|
||||
"""LangGraph structured output converter for CrewAI task integration.
|
||||
|
||||
This module contains the LangGraphConverterAdapter class that handles structured
|
||||
output conversion for LangGraph agents, supporting JSON and Pydantic model formats.
|
||||
"""
|
||||
|
||||
import json
|
||||
import re
|
||||
from typing import Any, Literal
|
||||
|
||||
from crewai.agents.agent_adapters.base_converter_adapter import BaseConverterAdapter
|
||||
from crewai.utilities.converter import generate_model_description
|
||||
|
||||
|
||||
class LangGraphConverterAdapter(BaseConverterAdapter):
|
||||
"""Adapter for handling structured output conversion in LangGraph agents"""
|
||||
"""Adapter for handling structured output conversion in LangGraph agents.
|
||||
|
||||
def __init__(self, agent_adapter):
|
||||
"""Initialize the converter adapter with a reference to the agent adapter"""
|
||||
self.agent_adapter = agent_adapter
|
||||
self._output_format = None
|
||||
self._schema = None
|
||||
self._system_prompt_appendix = None
|
||||
Converts task output requirements into system prompt modifications and
|
||||
post-processing logic to ensure agents return properly structured outputs.
|
||||
"""
|
||||
|
||||
def configure_structured_output(self, task) -> None:
|
||||
"""Configure the structured output for LangGraph."""
|
||||
def __init__(self, agent_adapter: Any) -> None:
|
||||
"""Initialize the converter adapter with a reference to the agent adapter.
|
||||
|
||||
Args:
|
||||
agent_adapter: The LangGraph agent adapter instance.
|
||||
"""
|
||||
super().__init__(agent_adapter=agent_adapter)
|
||||
self.agent_adapter: Any = agent_adapter
|
||||
self._output_format: Literal["json", "pydantic"] | None = None
|
||||
self._schema: str | None = None
|
||||
self._system_prompt_appendix: str | None = None
|
||||
|
||||
def configure_structured_output(self, task: Any) -> None:
|
||||
"""Configure the structured output for LangGraph.
|
||||
|
||||
Analyzes the task's output requirements and sets up the necessary
|
||||
formatting and validation logic.
|
||||
|
||||
Args:
|
||||
task: The task object containing output format specifications.
|
||||
"""
|
||||
if not (task.output_json or task.output_pydantic):
|
||||
self._output_format = None
|
||||
self._schema = None
|
||||
@@ -32,7 +56,14 @@ class LangGraphConverterAdapter(BaseConverterAdapter):
|
||||
self._system_prompt_appendix = self._generate_system_prompt_appendix()
|
||||
|
||||
def _generate_system_prompt_appendix(self) -> str:
|
||||
"""Generate an appendix for the system prompt to enforce structured output"""
|
||||
"""Generate an appendix for the system prompt to enforce structured output.
|
||||
|
||||
Creates instructions that are appended to the system prompt to guide
|
||||
the agent in producing properly formatted output.
|
||||
|
||||
Returns:
|
||||
System prompt appendix string, or empty string if no structured output.
|
||||
"""
|
||||
if not self._output_format or not self._schema:
|
||||
return ""
|
||||
|
||||
@@ -41,19 +72,36 @@ Important: Your final answer MUST be provided in the following structured format
|
||||
|
||||
{self._schema}
|
||||
|
||||
DO NOT include any markdown code blocks, backticks, or other formatting around your response.
|
||||
DO NOT include any markdown code blocks, backticks, or other formatting around your response.
|
||||
The output should be raw JSON that exactly matches the specified schema.
|
||||
"""
|
||||
|
||||
def enhance_system_prompt(self, original_prompt: str) -> str:
|
||||
"""Add structured output instructions to the system prompt if needed"""
|
||||
"""Add structured output instructions to the system prompt if needed.
|
||||
|
||||
Args:
|
||||
original_prompt: The base system prompt.
|
||||
|
||||
Returns:
|
||||
Enhanced system prompt with structured output instructions.
|
||||
"""
|
||||
if not self._system_prompt_appendix:
|
||||
return original_prompt
|
||||
|
||||
return f"{original_prompt}\n{self._system_prompt_appendix}"
|
||||
|
||||
def post_process_result(self, result: str) -> str:
|
||||
"""Post-process the result to ensure it matches the expected format"""
|
||||
"""Post-process the result to ensure it matches the expected format.
|
||||
|
||||
Attempts to extract and validate JSON content from agent responses,
|
||||
handling cases where JSON may be wrapped in markdown or other formatting.
|
||||
|
||||
Args:
|
||||
result: The raw result string from the agent.
|
||||
|
||||
Returns:
|
||||
Processed result string, ideally in valid JSON format.
|
||||
"""
|
||||
if not self._output_format:
|
||||
return result
|
||||
|
||||
@@ -65,16 +113,16 @@ The output should be raw JSON that exactly matches the specified schema.
|
||||
return result
|
||||
except json.JSONDecodeError:
|
||||
# Try to extract JSON from the text
|
||||
import re
|
||||
|
||||
json_match = re.search(r"(\{.*\})", result, re.DOTALL)
|
||||
json_match: re.Match[str] | None = re.search(
|
||||
r"(\{.*})", result, re.DOTALL
|
||||
)
|
||||
if json_match:
|
||||
try:
|
||||
extracted = json_match.group(1)
|
||||
extracted: str = json_match.group(1)
|
||||
# Validate it's proper JSON
|
||||
json.loads(extracted)
|
||||
return extracted
|
||||
except:
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
return result
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.177.0,<1.0.0"
|
||||
"crewai[tools]>=0.186.1,<1.0.0"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.177.0,<1.0.0",
|
||||
"crewai[tools]>=0.186.1,<1.0.0",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.177.0"
|
||||
"crewai[tools]>=0.186.1"
|
||||
]
|
||||
|
||||
[tool.crewai]
|
||||
|
||||
@@ -1,26 +1,22 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
|
||||
import inspect
|
||||
import textwrap
|
||||
from typing import Any, Callable, Optional, Union, get_type_hints
|
||||
from collections.abc import Callable
|
||||
from typing import TYPE_CHECKING, Any, get_type_hints
|
||||
|
||||
from pydantic import BaseModel, Field, create_model
|
||||
|
||||
from crewai.utilities.logger import Logger
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
|
||||
|
||||
class ToolUsageLimitExceeded(Exception):
|
||||
class ToolUsageLimitExceededError(Exception):
|
||||
"""Exception raised when a tool has reached its maximum usage limit."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class CrewStructuredTool:
|
||||
"""A structured tool that can operate on any number of inputs.
|
||||
@@ -69,10 +65,10 @@ class CrewStructuredTool:
|
||||
def from_function(
|
||||
cls,
|
||||
func: Callable,
|
||||
name: Optional[str] = None,
|
||||
description: Optional[str] = None,
|
||||
name: str | None = None,
|
||||
description: str | None = None,
|
||||
return_direct: bool = False,
|
||||
args_schema: Optional[type[BaseModel]] = None,
|
||||
args_schema: type[BaseModel] | None = None,
|
||||
infer_schema: bool = True,
|
||||
**kwargs: Any,
|
||||
) -> CrewStructuredTool:
|
||||
@@ -164,7 +160,7 @@ class CrewStructuredTool:
|
||||
|
||||
# Create model
|
||||
schema_name = f"{name.title()}Schema"
|
||||
return create_model(schema_name, **fields)
|
||||
return create_model(schema_name, **fields) # type: ignore[call-overload]
|
||||
|
||||
def _validate_function_signature(self) -> None:
|
||||
"""Validate that the function signature matches the args schema."""
|
||||
@@ -192,7 +188,7 @@ class CrewStructuredTool:
|
||||
f"not found in args_schema"
|
||||
)
|
||||
|
||||
def _parse_args(self, raw_args: Union[str, dict]) -> dict:
|
||||
def _parse_args(self, raw_args: str | dict) -> dict:
|
||||
"""Parse and validate the input arguments against the schema.
|
||||
|
||||
Args:
|
||||
@@ -207,18 +203,18 @@ class CrewStructuredTool:
|
||||
|
||||
raw_args = json.loads(raw_args)
|
||||
except json.JSONDecodeError as e:
|
||||
raise ValueError(f"Failed to parse arguments as JSON: {e}")
|
||||
raise ValueError(f"Failed to parse arguments as JSON: {e}") from e
|
||||
|
||||
try:
|
||||
validated_args = self.args_schema.model_validate(raw_args)
|
||||
return validated_args.model_dump()
|
||||
except Exception as e:
|
||||
raise ValueError(f"Arguments validation failed: {e}")
|
||||
raise ValueError(f"Arguments validation failed: {e}") from e
|
||||
|
||||
async def ainvoke(
|
||||
self,
|
||||
input: Union[str, dict],
|
||||
config: Optional[dict] = None,
|
||||
input: str | dict,
|
||||
config: dict | None = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Asynchronously invoke the tool.
|
||||
@@ -234,7 +230,7 @@ class CrewStructuredTool:
|
||||
parsed_args = self._parse_args(input)
|
||||
|
||||
if self.has_reached_max_usage_count():
|
||||
raise ToolUsageLimitExceeded(
|
||||
raise ToolUsageLimitExceededError(
|
||||
f"Tool '{self.name}' has reached its maximum usage limit of {self.max_usage_count}. You should not use the {self.name} tool again."
|
||||
)
|
||||
|
||||
@@ -243,44 +239,37 @@ class CrewStructuredTool:
|
||||
try:
|
||||
if inspect.iscoroutinefunction(self.func):
|
||||
return await self.func(**parsed_args, **kwargs)
|
||||
else:
|
||||
# Run sync functions in a thread pool
|
||||
import asyncio
|
||||
# Run sync functions in a thread pool
|
||||
import asyncio
|
||||
|
||||
return await asyncio.get_event_loop().run_in_executor(
|
||||
None, lambda: self.func(**parsed_args, **kwargs)
|
||||
)
|
||||
return await asyncio.get_event_loop().run_in_executor(
|
||||
None, lambda: self.func(**parsed_args, **kwargs)
|
||||
)
|
||||
except Exception:
|
||||
raise
|
||||
|
||||
def _run(self, *args, **kwargs) -> Any:
|
||||
"""Legacy method for compatibility."""
|
||||
# Convert args/kwargs to our expected format
|
||||
input_dict = dict(zip(self.args_schema.model_fields.keys(), args))
|
||||
input_dict = dict(zip(self.args_schema.model_fields.keys(), args, strict=False))
|
||||
input_dict.update(kwargs)
|
||||
return self.invoke(input_dict)
|
||||
|
||||
def invoke(
|
||||
self, input: Union[str, dict], config: Optional[dict] = None, **kwargs: Any
|
||||
self, input: str | dict, config: dict | None = None, **kwargs: Any
|
||||
) -> Any:
|
||||
"""Main method for tool execution."""
|
||||
parsed_args = self._parse_args(input)
|
||||
|
||||
if self.has_reached_max_usage_count():
|
||||
raise ToolUsageLimitExceeded(
|
||||
raise ToolUsageLimitExceededError(
|
||||
f"Tool '{self.name}' has reached its maximum usage limit of {self.max_usage_count}. You should not use the {self.name} tool again."
|
||||
)
|
||||
|
||||
self._increment_usage_count()
|
||||
|
||||
if inspect.iscoroutinefunction(self.func):
|
||||
result = asyncio.run(self.func(**parsed_args, **kwargs))
|
||||
return result
|
||||
|
||||
try:
|
||||
result = self.func(**parsed_args, **kwargs)
|
||||
except Exception:
|
||||
raise
|
||||
return asyncio.run(self.func(**parsed_args, **kwargs))
|
||||
|
||||
result = self.func(**parsed_args, **kwargs)
|
||||
|
||||
|
||||
@@ -330,4 +330,222 @@ interactions:
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"input": ["Capital of France"], "model": "text-embedding-3-small", "encoding_format":
|
||||
"base64"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '96'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- _cfuvid=rvDDZbBWaissP0luvtyuyyAWcPx3AiaoZS9LkAuK4sM-1746636999152-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.93.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.93.0
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/embeddings
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1R6Ww+yyrbl+/4VK+uV3pGLUFXrjbsISKEgYqfTAUEEBeRWQJ2c/97Br/t094uJ
|
||||
WIZUzVljjjHm/I9//fXX321a5Y/x73/++vtTDuPf/217liVj8vc/f/33f/31119//cfv8/9bmddp
|
||||
nmVlU/yW/34smyxf/v7nL/a/nvzfRf/89XfIFDaOu8OtX0ttiOESiQE+pXaRrnxot/BYFAy5kEUP
|
||||
ONOCJaymU0mOVijT9bPeH/BZXg4kYptvv4S7/QQztojx/ay+Ul5kFR8F+9XEUeizziKFvQ+uIr4T
|
||||
E107h77ZSw1r9zPiUCqSYL4SxUbv5usRy5ZLusgkecBnVXUYOzGbrs25voiDG1c4vEhCTxHz8GCW
|
||||
BAdvL9+sngv6iwv3uxDj7HWgPUkXUIBvFy3YnkpJo7AQIvipwoIEgINatQMnCZbG54EP4NtQGgm1
|
||||
j84HJBDbhxeNI+7DBsboBuS5G27VeLUbGV4TmSEyXDVNYO2qg963GbCJrrazpK3iIk9XzQn1/aGa
|
||||
HvHpDatPvZDj9a5UvL67PpApmxGO6maite0dfTjujIfHLN4F0Dun8IiTkpSc0vyuLRl9hRAz+on4
|
||||
10ruubG6mchYruLEKW2rfYOdyUKFSzKs4XeRsjXNYsmeAMVeXOk9n7hjDu9xnGNTZ57p3DVSDWr5
|
||||
fMXpOdNo4zDGG1m7/QtHFVHoPOe4gw9gDliBlhTQ8z7hRY3aLjneZ4Uu8u3TAfe1ODjU9lrFPfww
|
||||
hvez5uNTtZcBx1Zhglg5CrF231NAhv5ewND/BCQ5FX2/CJdjAW8sKYkyly7lHFe2AdR9HZ9Jc6EC
|
||||
8O8qelzOmcdoegrac2xJ0KLvI8be8UXp28I+fJ/uiCQH4ews2KhKpFTMwxPPo5S+M+Oxh4esr7Hb
|
||||
TDigi6VIaPf8aMS6GVol/PItMgbfA8z3HghaTguYSWJFrmr1AovFCxN6lWOCH8Zy0Cj3tG24rysZ
|
||||
n9r61c8ncz9A32GbqY46xeETxDNIpUswCWHyqRbOfMVIaG0bG4uy9Et2iGSYhTeJqEEyARpwbgEt
|
||||
fzpMxzJXU9bImBBMrPAicq0fwCyP9wiGaWsRR481jeNRCZHD8pG348i7IurdyaE9iRTHha1X0yeE
|
||||
NbicCPCK19j063LnYvSxK5+oxyWshHE3y2h/CD/kfrjXdEaVFIL3y9fwbQWHgApiPMGnM2U4gotf
|
||||
rZ/1nKNkskqcWfRTrXUytNBlfAWr5VD1rak4ETSWm4iVsLxt+8tVwDlaOfWqg7XlfHY6cDjfPGIy
|
||||
4EFJveg+1JsHxmnzzgDl35IHDwafeKabnYLRmXoXamJ1m+ZoDTQara0OOTVZsHr21YoviMHCV9w2
|
||||
Xhyo0Fma0VZh/dXfON9HRTV/7vMAFpFeJxbnZ20Rs28L+x0YiFcKNzDj21P95SexzvRYcVejYNHL
|
||||
3p9w6l15ZzaGYQZWc7gQo/rMKQEr1P+s12KUOD+8gU/9ir193EcVh3mkwnF3eGDjcbS1LV9l8aiE
|
||||
lbfU4hqMtzd6S8X3POFnBStnNPtbjcDbqLGm0n06n77BCkrCD948wBDMDjY8+L7vWRztmSPoLobs
|
||||
/fCAaLN67eeMnxKYHFWJHOpFTvlQEfcwPpWLR7+T6nBPHQ9S9MI6ceprH0yrM/OI8IDBTs6jdNu/
|
||||
DN9WTr3yzmgaX5ATD3fr0/R4tjlWbCmuMlpPOSTqnak0sgPGHo6vm7bhs619j3KhIsKLzDT/8EeV
|
||||
1hpe4nkhF+HtaLPNtTnkXo2LtUA2KUfDD4SscAUY21nSc339iVEYXiSiAuAEfKu9BnSa9hl+HG4i
|
||||
WMTs1aGT3p/w7z7wL7dc0RLgAeMrbKtB08gM7kdXw9nexVrfiZGOPs9uT3R6PmksFWUT1dXxhuWD
|
||||
++l7duVnMGVqiPVdGjpcC+YHhF+uw9bK7bWFrcIYct54xGcd2g6b9t7lT71zGdpRKl2frhTXkeJ9
|
||||
TEnVKDafDxh0yteD3rOp1rgp3uiZ2Tnxjq2vsVdrlKRLvC5Y9vRLT+tFv6DrpZ+JWoHIWVRpfcMt
|
||||
HsSNn2oq3OsgRiqlAbaSl+1wVXSckWv6N5I43Q3wrfdkwHzTHyRWXyxYSb/av/wg3m6t6cJPsJTU
|
||||
lDFxoE2rs6T3ywxD+rxO+8ukpUIowTesh9kh0fhM6dqy1IXtM9OxfBtdwKUjTuA8ePrEcrmiCTwT
|
||||
STCTvSvOZOPjbPjeIvdFHeKJitivfqDP6G7CjBzN/KSNuyV4w21/WAuA3As69GPIvT4uPsx2oq1R
|
||||
zQ+wegGbGLWJtCH83k0QNKHhzVnUpHzifh4ofz1PHmdKpdOuB30AeUY/3jc+Tz2NKrH9xYPkr7Oh
|
||||
Cca7f0PXOb2wdXocA5Zw7AOBV6wSP9bHtPM5wkrIHQdispqv0fNBidD3XJUT3zHYoW7nQyh809Hb
|
||||
H42vI2QobaEa+0+c4XB11putTSA5yhLxrDFKZ41QHd4WDXit8S2rqTo+HvA1zphoPP4GRHncV4jl
|
||||
YzR9g2oE81l+mGLTgpToVcqAWc7LPUpQhDzY2FfA8rkO4Wjw2OOy+FEtmjBc4GMML0S1StXhbOH1
|
||||
RkrTavhxApPGqcevDleddYhy+egBOUmCj2q3GT3JSz/BELGi+wd/5LfHBvRY7WTYIQETXDSPlN3w
|
||||
AgyJhXDEAEgH7RTLcOc5KjlcTueeFtkl+lMPsEISh33czh26z7gnp8K9g29/kic0WAMkh8WgQBh0
|
||||
5gKKy74ix0vLassrjy5o6d4SkfkP0eawuEyQu5V3YufKBcyXbIboy6bvaTl1di+8IzRImSWY09kl
|
||||
PhAI/A6QdbuBKLcG9QMPvrzQTt8nztr6VbHzwJhSFKcslmP9lAp5UXQIjFPiOUTrwWqpX+tP/mtc
|
||||
iPvJCZZQcly+J9rXtirhtexZIJhVTzb8C9aIGy1YfS8x8T7tJ2gMl1/h/az4ntBbRb9AtS7gEoEA
|
||||
O80bgd/9hnyvCuQQJp+eWnp5kXhcatOeQwZdNHy2YWsZPLbXz6tarvfrAz1hKGKv65j+veEV5BhT
|
||||
nahzKpxZqqMVupNRYVvMvJ7/Sv0FbnhHrLpfHHrX5hB+PE8gWos9sAiP3Sy1RnzBxzQK+3l9v2oQ
|
||||
0uyKQ3RuANH8kw6/kxzjbOOXQmSGK/RVTCdwMqtq3Yn6/MtPbL1fOhBOr0uNBll38F2LngHHFZ4O
|
||||
F/4wEcv4qpUQa14Ob2eh8Fgdvx1ivKs3xPWww7kIg2BelEcLLr4tT8vjyqRrVS/RH34h2+2gLexq
|
||||
s7DjvHxipUJK6cangcZxM06uwSWgOHcvcMoDwXvkDy7o+ciZ4XqpP8RRHezwQ/JiID/bxSS6vaJ9
|
||||
X/arQyKBPTGGbglG4SGscPceBhzQQ9ovtzf3hvU1QRPParO27KWwg/zJ2OHj1B6D9ZuZMVQiV8VB
|
||||
9DgFg8a8CpAnB8YTZlvSfvkNtXlf4xv4HgArpW0BNzwm6l37BlRT8g4wnxudqslQg8mLAhOZgoG8
|
||||
9cdHEsRDIKwumnapmPbzQJMJHoKaxcrGX3/4iMDpBYnmfc1KcLsYwvq4PxLtNA2UCF3ng+h10qdd
|
||||
nRFap50aojJPeqx+TnPaSb26R8wcf7G/C0j1zbuOgXs3IB6bjExA90dBB32sF95Mj1YvbN/Rxk9I
|
||||
Yow64HOQFKChzJngJv72q6r2rOSwbIQV5VgFg4dWH5k3scTu+q0Ambs3A1fR8vBt/I7BPBwOCdyJ
|
||||
fEiMV6pro+d9JVCY94FYXzcDvaWXPkpFGeN4qY2eqyJlRnBfH7yPfmTpwOc6I92mMMJX5MQVbYyO
|
||||
lcbbxd/yray2/V/g5/o2cZ7qi7a691eC1FoVf3yhmp+q+YbvCjUkQ/lXowa9qHA9PSBR7WFK5/KR
|
||||
WX/qXYfYCfQ5m13gGLy/2FuPQz/o+rlD6mwM5NZbcs9zXlLD4/1IyHH+wIrGZ1aCA7l88AlWJFjk
|
||||
OjQRMydfD5qDDFojYyL44+PO+lyDRa1OM7zlzh5rytugQh2QGEz6ycAGcvbV+sZ6iXKDuU/rwW/o
|
||||
d+MbwJyHFWdfjnVmfgAJ+O4PDnHzB5cSK/AHwJeJOQmrPFakbNccJRwi2OqRo/G96Tykl1buvPHw
|
||||
clK2ZVAJxITTSJ4chWAKqBPDTb8RV3zNdEnvyQq9em9h88GE/WrRtQU/fXXAg06Xj36SITeKb2Ie
|
||||
P0Ww6GaygmOUO8RwzmUwd/xLRdM9GsnJrJd+0x8h2PALO1nTASp/Yhvm/JvDxySVwAgnkUcbvyWG
|
||||
SC4O6198HlbOtcAGhiCYHkzBI2ZiGqw97RP9jNXThGlkn7APhKPDO8ESAeMqGMTK0Fwt+nOXgy6S
|
||||
NSID4aiRXVyyUGVYi4RZ7lA+oE4CZRrJRNvPvEM13zChJr5u3szEVzoPV8SDa6Iy+CSpaiXgQZog
|
||||
aOgJK2fz7FBWGX3QS7DE2vHm9iy+3VT49mJ54saPowmPq1xA4SSJZLu/gGbJ/EblmwrePn1UYCK7
|
||||
eoYnK2KJ9uNb2KgKVLZZThyLVMEsj+fwd37TaxrELV8T9Yd3WJ51tV+b8+RDJN/DTZ/LgQAPlxYq
|
||||
OU+J+Rk4rVX2hxruibNMHGNeAqGLqIys0rLIvTqCYH6xCgPZ83rFch+9wNAyXAmsTH16/J75gtWF
|
||||
7gPmXmOTw+GLUrryJxPeB93A12BXgpmmvQtxpcX48OHNfnnqeIK/88eTH/XLcjR4KJbu5efngHm3
|
||||
93kwOe+AeJirwLpOwR5BE7XE84vM4YmX+fAwV19vJ5lvSpMskyHoZIpl6B37ae/DCGqWBIjiiZlG
|
||||
iywJYQpG6rH53ajmvBRXsHfPBGNmycDS5EHNFfYhmCStmOliGZMLW2Xq8UkJb9Wq3ezo5/8Qyw7T
|
||||
fmzBPof5x2L/3C9CSz9H3rMQt/yWA77eKwWsDl+RHGuurKZz0/vQct45OZiaXa0JtSZo4zEmj/Rt
|
||||
pbN/vOSIUZ8L0eXqQckS7UNkZfKThPdD3xP541uIpHebeIOwVpueV9FTv2FsVB8/ZfscenCPUYST
|
||||
p2AGwtwNDEj1ycWHwOpTOuj8Bb7qu4LtyXsFlG2tEpYscyVHxaXOUvrnAaaXR01wRnC62rtklTa+
|
||||
hR1hGdI581Ifci2TTNLkKcH8uJ1bBKvHk+SPqA8oLeMc6L6h4OTerRUlju8iIn0+5KkQyVm3+waR
|
||||
LVv48PMn9kfNhsGS7DF2A+/P+6Dn6fuJ9xqxmupvasKyKsNpOZtnjQaXVw43vj7B6kaccfN34GGI
|
||||
NXw5LmxFJLDm6E7BBePzxwros/Yn1LRiSix/jyk7MRIPzVGzscmVWspl+6aAnkk6rL9V2elEET+A
|
||||
wWkFUTAbAbI2UQdTLmwnfsOrmR9oAn5+1MZXNYrIZwbnqSqwBaq4GqD8SX7xxNiBI/3hF6yk4U4y
|
||||
G52BUAdNjN479eEtHybu1+SdXODiFxYxjF2bzk3UxaAy44hc47HqOU6Q9rC7hA/y03NLt+/sP3h0
|
||||
P9xNSoB/lmHER8MEnsvNmdeDPv38S2IEO5XysGFruOHHJLEO2087f+mQJl9sfCx0Lp2E9mVLDfz8
|
||||
H3z9+Y+92T5J0N1TjRejdwKb2e+wB0eZCoo+MPBz3j9JnD4qOvO2HyI218epZ773dGaZvSVKTvbE
|
||||
zmRdnMU2YxNsfi22FokJ6DU/1jDx4yOJDFF0Vv0hJbBdbwdyXIoo5cluWiHiISLKs/s4czHd9/B0
|
||||
OjPeromPlaDloICptzJTtfEvfqsvUHbCK85pvNPWTI8hXKAMiBJBvRLq3TpA53jUsXWm34rW5ZWF
|
||||
9sFsiCwUFqDrTsvBhic/Pzcgh1PRIl5/etNSlhdnLcx1QMHBabB5CWQgiPpeguvudfeks9CDGV01
|
||||
CRJhrxDsxGGwgK8yo9N95TwmONvpli8xOuFBxUotrikVTkYsbviDbZsw2urm5A0vjnojxqbvOYja
|
||||
C9TOk7Pxt5VOh+fDhrITXYlyMPlgIl52gT7lLwTv/XMw9zl0oV0HKsafMtJ6rRH3YNPHHkpz0SEO
|
||||
c6rBFl8PHm4inU971UM/v3njc/1YKr0PkuQBMN78ofVjehbIh0KdOss79/RCwgeaUWdgIypwRa/D
|
||||
Xv35H8T/HCetu+elBze/yqNB+OxX7aaG6B4OAlZrq3MWOC0s2vQrPi4Fn071/ljCULAhdkKxC4Tj
|
||||
3Vp//yd4ifV0OUlaKL5cZ/akMI77OWsECAfxGk/DdQrB/LAqV9rwHpt6V1fLK899wIZPcfps/IGA
|
||||
t9FC/eN+SSSKVT/sv6MJvOvjgI/bfRluJcx/+OsJwuVQkW1/8OcnRZ/hqpEhGAawOMmTyGe3SJfY
|
||||
st9wjdGM5UEBThMOng02PMHpeIrTdZ1SCT7vuYCtpf5Um1/jwWU3lfiw3yf9eH5KJuSr73d68c8d
|
||||
HZ5CfIGJ2EdeluC6H+7+bpB0r9thNb9qwdLg0Yab3+S9SLMC2j3F5Fff8LFdOUCYizTDyzv4bHpP
|
||||
6ac6eXfwgFh92vCkWhRNVCWPNyti2/rH+cMHNj1JsNoFlHuN7gB/eu6w6c25a9Y3PPUePzEgrau5
|
||||
H8sWfs+vkiiS0zgr2U3zD3+J+wVawLMiq4p5qitb/jt0nC+6h+BYnT1287/G5XhioWDjCza+Xgfo
|
||||
O+ImOEGXkOweukDoxFwH0XNRsU+Pbb/A8wMC9TsU5K64VBuDR79CY/QCchIPLaBRtbTIL+Mz9oJz
|
||||
l87v0OrgtH9Zf/g0a+CDBJ4NPhNN2ec9laPXgPKhVLE5JdeKX0x5RUNfM8S5srrDea1Xw+FCCXFD
|
||||
CrRe5NUYNpa/89Ys+VYziBMXuJ5oEWVcdsEqioccNP1RmVB9P1V00BkfIvKZsDKadUA2fQ/XhcrE
|
||||
PJNDwNvzOwTIJQO2ETvR9YVYH8nXu0WwA0+UZRIA4bQcpenr9i9tuJvHHAYnk8XWk+oOnYzWRvVR
|
||||
OmI1HL7OWl4SHsTCqnprBXhtBfPK/PETVLK8gxXoTQt/9ce1rUOwvtxuhTrfdjgUOSXd+jUR4n1o
|
||||
kNvVYPqhOQAIG9ioRFWYkfbV1JVQNdl8i9/ozBoBJpS6XvLKPnlr9BBHe9hO/dMT/UNTzbS7t+Cn
|
||||
f8yBVakwn+UL0voZYVUyBNpMT52Hm77CrtAjbfzohiotXS0RXSqkYNz8Ydg4TE0co7lWa/SwHrAX
|
||||
fEDOF8WmA2aphGqp/GAVnYyK/eknTG4zdrf+Bx837RvWt64izidZKpKhtIOWjn1vTU9cvw4OKP74
|
||||
G27y9SqujqvyT3ycnM+C+aenLkXDTEAzW0B//mckqQGxxPuQbniXw7NPD9P+ZlT93BwoA5Rl4rFN
|
||||
Mgp+fgtqC2AS75kuwXjlpQhKB7qfaIwkrb37wgCUnKWbuXTUtn5OCX563rt7WbVemb6Al0cee4Ji
|
||||
zXRuojL+g/+O63zBuvVv4MjnPTGKwk6XJCgH4BhZNoHhwlS02KEYZucu8sQDklN+5y8t2urptLjE
|
||||
p+MLExaQUztjL3T1YN09CxMpjOYRZ/emv35E9/N7sBeNM/0mQTdB8WR+seXga7pgoy/h7/w3Pkzn
|
||||
PNQiKO/PNn7gRAl+/BSuzXTC0fgEdG57aw+as4mx7J/NXiidaw09XTbxTx99Gc/rwOPb9eTg3phf
|
||||
P6aFy90WPenD3qtl67cARTR1bJH7uZ/rZ7GiD7u+iMWhD5imW9BB/l7zxFJq4AxN1CVQi/rrNOtp
|
||||
nC7P1/AGSuFzePOHqzk9f99w81vxIXqfKf1IdvvTo+QEKxyspfSB0OpmER8SsDrLvm5l2Do0m3a+
|
||||
eXXW2pxldLw7hDjbeQ9bPwKW7TPHbrC8Kirydgw7WUqJcUw9IJCR+n/8Gquw39Xy45PdyaNbf0yv
|
||||
eNeWINSDucUGIUK6/PrV/bd4TWCrH6yUFgXqtM8O6+cdAtMtsHW4a5OAWN6t0pYqPUAoNUzk9bJS
|
||||
/vRVDiRDD6dff41w4/wGh3Z4ELvwTMpLvSr9/GmibPySpgstkYZcxoNKYlLaRUCFW78HywNk6fLh
|
||||
8lL6Puob1hP1E7TNATDQGTST2JE3ONRIPy6MmF3mwQ0vh7h9F1B+zBGJk5et8Y3bmpDgffTzryjX
|
||||
vp0WTiz3msRVJ2CuVWcPHGXhyeH2dCrOlDwVdIXJEwdNu75Tzxcf7YziTDJ2bqu1Fx4Q/v2bCvjP
|
||||
f/311//4TRjUbZZ/tsGAMV/Gf//XqMC/hX8PdfL5/BlDmIakyP/+539PIPz97dv6O/7PsX3nzfD3
|
||||
P38Jf0YN/h7bMfn8P4//tb3oP//1vwAAAP//AwDPjjDU3iAAAA==
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 97d174615cfef96b-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 10 Sep 2025 19:50:30 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=eYh.U8kiOc9xS0U2L8g4MiopA6w9E7lUuodx4D.rMOU-1757533830-1.0.1.1-YO2od1GbrHRgwOEdJSw3gCcNy8XFBF_O.jT_f8F2z6dWZsBIS7XPLWUpJAzenthO1wXRkx7OZDmVrPCPro2sSj1srJCxCY8KgIwcjw5NWGU;
|
||||
path=/; expires=Wed, 10-Sep-25 20:20:30 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=vkbBikeJy.dDV.o7ZB2HjcJaD_hkp9dDeCEBfHZxG94-1757533830280-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-model:
|
||||
- text-embedding-3-small
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '172'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
via:
|
||||
- envoy-router-59c745856-z5gxd
|
||||
x-envoy-upstream-service-time:
|
||||
- '267'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '10000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '9999996'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_06f3f9465f1a4af0ae5a4d8a58f19321
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
"""Test Agent creation and execution basic functionality."""
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
from collections import defaultdict
|
||||
from concurrent.futures import Future
|
||||
from hashlib import md5
|
||||
from unittest import mock
|
||||
from unittest.mock import ANY, MagicMock, patch
|
||||
from collections import defaultdict
|
||||
|
||||
import pydantic_core
|
||||
import pytest
|
||||
@@ -14,11 +14,29 @@ from crewai.agent import Agent
|
||||
from crewai.agents import CacheHandler
|
||||
from crewai.crew import Crew
|
||||
from crewai.crews.crew_output import CrewOutput
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.crew_events import (
|
||||
CrewTestCompletedEvent,
|
||||
CrewTestStartedEvent,
|
||||
CrewTrainCompletedEvent,
|
||||
CrewTrainStartedEvent,
|
||||
)
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemoryRetrievalCompletedEvent,
|
||||
MemoryRetrievalStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
)
|
||||
from crewai.flow import Flow, start
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
|
||||
from crewai.llm import LLM
|
||||
from crewai.memory.contextual.contextual_memory import ContextualMemory
|
||||
from crewai.memory.external.external_memory import ExternalMemory
|
||||
from crewai.memory.long_term.long_term_memory import LongTermMemory
|
||||
from crewai.memory.short_term.short_term_memory import ShortTermMemory
|
||||
from crewai.process import Process
|
||||
@@ -27,28 +45,9 @@ from crewai.tasks.conditional_task import ConditionalTask
|
||||
from crewai.tasks.output_format import OutputFormat
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.crew_events import (
|
||||
CrewTestCompletedEvent,
|
||||
CrewTestStartedEvent,
|
||||
CrewTrainCompletedEvent,
|
||||
CrewTrainStartedEvent,
|
||||
)
|
||||
from crewai.utilities.rpm_controller import RPMController
|
||||
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
|
||||
|
||||
from crewai.events.types.memory_events import (
|
||||
MemorySaveStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemoryRetrievalStartedEvent,
|
||||
MemoryRetrievalCompletedEvent,
|
||||
)
|
||||
from crewai.memory.external.external_memory import ExternalMemory
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def ceo():
|
||||
@@ -364,7 +363,7 @@ def test_hierarchical_process(researcher, writer):
|
||||
|
||||
assert (
|
||||
result.raw
|
||||
== "1. **The Rise of Autonomous AI Agents in Daily Life** \n As artificial intelligence technology progresses, the integration of autonomous AI agents into everyday life becomes increasingly prominent. These agents, capable of making decisions without human intervention, are reshaping industries from healthcare to finance. Exploring case studies where autonomous AI has successfully decreased operational costs or improved efficiency can reveal not only the benefits but also the ethical implications of delegating decision-making to machines. This topic offers an exciting opportunity to dive into the AI landscape, showcasing current developments such as AI assistants and autonomous vehicles.\n\n2. **Ethical Implications of Generative AI in Creative Industries** \n The surge of generative AI tools in creative fields, such as art, music, and writing, has sparked a heated debate about authorship and originality. This article could investigate how these tools are being used by artists and creators, examining both the potential for innovation and the risk of devaluing traditional art forms. Highlighting perspectives from creators, legal experts, and ethicists could provide a comprehensive overview of the challenges faced, including copyright concerns and the emotional impact on human artists. This discussion is vital as the creative landscape evolves alongside technological advancements, making it ripe for exploration.\n\n3. **AI in Climate Change Mitigation: Current Solutions and Future Potential** \n As the world grapples with climate change, AI technology is increasingly being harnessed to develop innovative solutions for sustainability. From predictive analytics that optimize energy consumption to machine learning algorithms that improve carbon capture methods, AI's potential in environmental science is vast. This topic invites an exploration of existing AI applications in climate initiatives, with a focus on groundbreaking research and initiatives aimed at reducing humanity's carbon footprint. Highlighting successful projects and technology partnerships can illustrate the positive impact AI can have on global climate efforts, inspiring further exploration and investment in this area.\n\n4. **The Future of Work: How AI is Reshaping Employment Landscapes** \n The discussions around AI's impact on the workforce are both urgent and complex, as advances in automation and machine learning continue to transform the job market. This article could delve into the current trends of AI-driven job displacement alongside opportunities for upskilling and the creation of new job roles. By examining case studies of companies that integrate AI effectively and the resulting workforce adaptations, readers can gain valuable insights into preparing for a future where humans and AI collaborate. This exploration highlights the importance of policies that promote workforce resilience in the face of change.\n\n5. **Decentralized AI: Exploring the Role of Blockchain in AI Development** \n As blockchain technology sweeps through various sectors, its application in AI development presents a fascinating topic worth examining. Decentralized AI could address issues of data privacy, security, and democratization in AI models by allowing users to retain ownership of data while benefiting from AI's capabilities. This article could analyze how decentralized networks are disrupting traditional AI development models, featuring innovative projects that harness the synergy between blockchain and AI. Highlighting potential pitfalls and the future landscape of decentralized AI could stimulate discussion among technologists, entrepreneurs, and policymakers alike."
|
||||
== "**1. The Rise of Autonomous AI Agents in Daily Life** \nAs artificial intelligence technology progresses, the integration of autonomous AI agents into everyday life becomes increasingly prominent. These agents, capable of making decisions without human intervention, are reshaping industries from healthcare to finance. Exploring case studies where autonomous AI has successfully decreased operational costs or improved efficiency can reveal not only the benefits but also the ethical implications of delegating decision-making to machines. This topic offers an exciting opportunity to dive into the AI landscape, showcasing current developments such as AI assistants and autonomous vehicles.\n\n**2. Ethical Implications of Generative AI in Creative Industries** \nThe surge of generative AI tools in creative fields, such as art, music, and writing, has sparked a heated debate about authorship and originality. This article could investigate how these tools are being used by artists and creators, examining both the potential for innovation and the risk of devaluing traditional art forms. Highlighting perspectives from creators, legal experts, and ethicists could provide a comprehensive overview of the challenges faced, including copyright concerns and the emotional impact on human artists. This discussion is vital as the creative landscape evolves alongside technological advancements, making it ripe for exploration.\n\n**3. AI in Climate Change Mitigation: Current Solutions and Future Potential** \nAs the world grapples with climate change, AI technology is increasingly being harnessed to develop innovative solutions for sustainability. From predictive analytics that optimize energy consumption to machine learning algorithms that improve carbon capture methods, AI's potential in environmental science is vast. This topic invites an exploration of existing AI applications in climate initiatives, with a focus on groundbreaking research and initiatives aimed at reducing humanity's carbon footprint. Highlighting successful projects and technology partnerships can illustrate the positive impact AI can have on global climate efforts, inspiring further exploration and investment in this area.\n\n**4. The Future of Work: How AI is Reshaping Employment Landscapes** \nThe discussions around AI's impact on the workforce are both urgent and complex, as advances in automation and machine learning continue to transform the job market. This article could delve into the current trends of AI-driven job displacement alongside opportunities for upskilling and the creation of new job roles. By examining case studies of companies that integrate AI effectively and the resulting workforce adaptations, readers can gain valuable insights into preparing for a future where humans and AI collaborate. This exploration highlights the importance of policies that promote workforce resilience in the face of change.\n\n**5. Decentralized AI: Exploring the Role of Blockchain in AI Development** \nAs blockchain technology sweeps through various sectors, its application in AI development presents a fascinating topic worth examining. Decentralized AI could address issues of data privacy, security, and democratization in AI models by allowing users to retain ownership of data while benefiting from AI's capabilities. This article could analyze how decentralized networks are disrupting traditional AI development models, featuring innovative projects that harness the synergy between blockchain and AI. Highlighting potential pitfalls and the future landscape of decentralized AI could stimulate discussion among technologists, entrepreneurs, and policymakers alike.\n\nThese topics not only reflect current trends but also probe deeper into ethical and practical considerations, making them timely and relevant for contemporary audiences."
|
||||
)
|
||||
|
||||
|
||||
@@ -570,8 +569,6 @@ def test_crew_with_delegating_agents(ceo, writer):
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_with_delegating_agents_should_not_override_task_tools(ceo, writer):
|
||||
from typing import Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.tools import BaseTool
|
||||
@@ -584,7 +581,7 @@ def test_crew_with_delegating_agents_should_not_override_task_tools(ceo, writer)
|
||||
class TestTool(BaseTool):
|
||||
name: str = "Test Tool"
|
||||
description: str = "A test tool that just returns the input"
|
||||
args_schema: Type[BaseModel] = TestToolInput
|
||||
args_schema: type[BaseModel] = TestToolInput
|
||||
|
||||
def _run(self, query: str) -> str:
|
||||
return f"Processed: {query}"
|
||||
@@ -622,18 +619,16 @@ def test_crew_with_delegating_agents_should_not_override_task_tools(ceo, writer)
|
||||
_, kwargs = mock_execute_sync.call_args
|
||||
tools = kwargs["tools"]
|
||||
|
||||
assert any(
|
||||
isinstance(tool, TestTool) for tool in tools
|
||||
), "TestTool should be present"
|
||||
assert any(
|
||||
"delegate" in tool.name.lower() for tool in tools
|
||||
), "Delegation tool should be present"
|
||||
assert any(isinstance(tool, TestTool) for tool in tools), (
|
||||
"TestTool should be present"
|
||||
)
|
||||
assert any("delegate" in tool.name.lower() for tool in tools), (
|
||||
"Delegation tool should be present"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_with_delegating_agents_should_not_override_agent_tools(ceo, writer):
|
||||
from typing import Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.tools import BaseTool
|
||||
@@ -646,7 +641,7 @@ def test_crew_with_delegating_agents_should_not_override_agent_tools(ceo, writer
|
||||
class TestTool(BaseTool):
|
||||
name: str = "Test Tool"
|
||||
description: str = "A test tool that just returns the input"
|
||||
args_schema: Type[BaseModel] = TestToolInput
|
||||
args_schema: type[BaseModel] = TestToolInput
|
||||
|
||||
def _run(self, query: str) -> str:
|
||||
return f"Processed: {query}"
|
||||
@@ -686,18 +681,16 @@ def test_crew_with_delegating_agents_should_not_override_agent_tools(ceo, writer
|
||||
_, kwargs = mock_execute_sync.call_args
|
||||
tools = kwargs["tools"]
|
||||
|
||||
assert any(
|
||||
isinstance(tool, TestTool) for tool in new_ceo.tools
|
||||
), "TestTool should be present"
|
||||
assert any(
|
||||
"delegate" in tool.name.lower() for tool in tools
|
||||
), "Delegation tool should be present"
|
||||
assert any(isinstance(tool, TestTool) for tool in new_ceo.tools), (
|
||||
"TestTool should be present"
|
||||
)
|
||||
assert any("delegate" in tool.name.lower() for tool in tools), (
|
||||
"Delegation tool should be present"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_task_tools_override_agent_tools(researcher):
|
||||
from typing import Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.tools import BaseTool
|
||||
@@ -710,7 +703,7 @@ def test_task_tools_override_agent_tools(researcher):
|
||||
class TestTool(BaseTool):
|
||||
name: str = "Test Tool"
|
||||
description: str = "A test tool that just returns the input"
|
||||
args_schema: Type[BaseModel] = TestToolInput
|
||||
args_schema: type[BaseModel] = TestToolInput
|
||||
|
||||
def _run(self, query: str) -> str:
|
||||
return f"Processed: {query}"
|
||||
@@ -718,7 +711,7 @@ def test_task_tools_override_agent_tools(researcher):
|
||||
class AnotherTestTool(BaseTool):
|
||||
name: str = "Another Test Tool"
|
||||
description: str = "Another test tool"
|
||||
args_schema: Type[BaseModel] = TestToolInput
|
||||
args_schema: type[BaseModel] = TestToolInput
|
||||
|
||||
def _run(self, query: str) -> str:
|
||||
return f"Another processed: {query}"
|
||||
@@ -754,7 +747,6 @@ def test_task_tools_override_agent_tools_with_allow_delegation(researcher, write
|
||||
"""
|
||||
Test that task tools override agent tools while preserving delegation tools when allow_delegation=True
|
||||
"""
|
||||
from typing import Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
@@ -766,7 +758,7 @@ def test_task_tools_override_agent_tools_with_allow_delegation(researcher, write
|
||||
class TestTool(BaseTool):
|
||||
name: str = "Test Tool"
|
||||
description: str = "A test tool that just returns the input"
|
||||
args_schema: Type[BaseModel] = TestToolInput
|
||||
args_schema: type[BaseModel] = TestToolInput
|
||||
|
||||
def _run(self, query: str) -> str:
|
||||
return f"Processed: {query}"
|
||||
@@ -774,7 +766,7 @@ def test_task_tools_override_agent_tools_with_allow_delegation(researcher, write
|
||||
class AnotherTestTool(BaseTool):
|
||||
name: str = "Another Test Tool"
|
||||
description: str = "Another test tool"
|
||||
args_schema: Type[BaseModel] = TestToolInput
|
||||
args_schema: type[BaseModel] = TestToolInput
|
||||
|
||||
def _run(self, query: str) -> str:
|
||||
return f"Another processed: {query}"
|
||||
@@ -815,17 +807,17 @@ def test_task_tools_override_agent_tools_with_allow_delegation(researcher, write
|
||||
used_tools = kwargs["tools"]
|
||||
|
||||
# Confirm AnotherTestTool is present but TestTool is not
|
||||
assert any(
|
||||
isinstance(tool, AnotherTestTool) for tool in used_tools
|
||||
), "AnotherTestTool should be present"
|
||||
assert not any(
|
||||
isinstance(tool, TestTool) for tool in used_tools
|
||||
), "TestTool should not be present among used tools"
|
||||
assert any(isinstance(tool, AnotherTestTool) for tool in used_tools), (
|
||||
"AnotherTestTool should be present"
|
||||
)
|
||||
assert not any(isinstance(tool, TestTool) for tool in used_tools), (
|
||||
"TestTool should not be present among used tools"
|
||||
)
|
||||
|
||||
# Confirm delegation tool(s) are present
|
||||
assert any(
|
||||
"delegate" in tool.name.lower() for tool in used_tools
|
||||
), "Delegation tool should be present"
|
||||
assert any("delegate" in tool.name.lower() for tool in used_tools), (
|
||||
"Delegation tool should be present"
|
||||
)
|
||||
|
||||
# Finally, make sure the agent's original tools remain unchanged
|
||||
assert len(researcher_with_delegation.tools) == 1
|
||||
@@ -929,9 +921,9 @@ def test_cache_hitting_between_agents(researcher, writer, ceo):
|
||||
tool="multiplier", input={"first_number": 2, "second_number": 6}
|
||||
)
|
||||
assert cache_calls[0] == expected_call, f"First call mismatch: {cache_calls[0]}"
|
||||
assert (
|
||||
cache_calls[1] == expected_call
|
||||
), f"Second call mismatch: {cache_calls[1]}"
|
||||
assert cache_calls[1] == expected_call, (
|
||||
f"Second call mismatch: {cache_calls[1]}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -1042,7 +1034,7 @@ def test_crew_kickoff_streaming_usage_metrics():
|
||||
assert result.token_usage.cached_prompt_tokens == 0
|
||||
|
||||
|
||||
def test_agents_rpm_is_never_set_if_crew_max_RPM_is_not_set():
|
||||
def test_agents_rpm_is_never_set_if_crew_max_rpm_is_not_set():
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
@@ -1395,8 +1387,9 @@ def test_kickoff_for_each_error_handling():
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
|
||||
with patch.object(Crew, "kickoff") as mock_kickoff:
|
||||
mock_kickoff.side_effect = expected_outputs[:2] + [
|
||||
Exception("Simulated kickoff error")
|
||||
mock_kickoff.side_effect = [
|
||||
*expected_outputs[:2],
|
||||
Exception("Simulated kickoff error"),
|
||||
]
|
||||
with pytest.raises(Exception, match="Simulated kickoff error"):
|
||||
crew.kickoff_for_each(inputs=inputs)
|
||||
@@ -1674,9 +1667,9 @@ def test_code_execution_flag_adds_code_tool_upon_kickoff():
|
||||
|
||||
# Verify that exactly one tool was used and it was a CodeInterpreterTool
|
||||
assert len(used_tools) == 1, "Should have exactly one tool"
|
||||
assert isinstance(
|
||||
used_tools[0], CodeInterpreterTool
|
||||
), "Tool should be CodeInterpreterTool"
|
||||
assert isinstance(used_tools[0], CodeInterpreterTool), (
|
||||
"Tool should be CodeInterpreterTool"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -1760,10 +1753,10 @@ def test_agent_usage_metrics_are_captured_for_hierarchical_process():
|
||||
assert result.raw == "Howdy!"
|
||||
|
||||
assert result.token_usage == UsageMetrics(
|
||||
total_tokens=2390,
|
||||
prompt_tokens=2264,
|
||||
completion_tokens=126,
|
||||
successful_requests=4,
|
||||
total_tokens=1673,
|
||||
prompt_tokens=1562,
|
||||
completion_tokens=111,
|
||||
successful_requests=3,
|
||||
cached_prompt_tokens=0,
|
||||
)
|
||||
|
||||
@@ -2179,8 +2172,7 @@ def test_tools_with_custom_caching():
|
||||
return first_number * second_number
|
||||
|
||||
def cache_func(args, result):
|
||||
cache = result % 2 == 0
|
||||
return cache
|
||||
return result % 2 == 0
|
||||
|
||||
multiplcation_tool.cache_function = cache_func
|
||||
|
||||
@@ -2884,7 +2876,7 @@ def test_manager_agent_with_tools_raises_exception(researcher, writer):
|
||||
tasks=[task],
|
||||
)
|
||||
|
||||
with pytest.raises(Exception):
|
||||
with pytest.raises(Exception, match="Manager agent should not have tools"):
|
||||
crew.kickoff()
|
||||
|
||||
|
||||
@@ -3108,7 +3100,7 @@ def test_crew_task_db_init():
|
||||
db_handler.load()
|
||||
assert True # If we reach this point, no exception was raised
|
||||
except Exception as e:
|
||||
pytest.fail(f"An exception was raised: {str(e)}")
|
||||
pytest.fail(f"An exception was raised: {e!s}")
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -3494,8 +3486,9 @@ def test_key(researcher, writer):
|
||||
process=Process.sequential,
|
||||
tasks=tasks,
|
||||
)
|
||||
hash = hashlib.md5(
|
||||
f"{researcher.key}|{writer.key}|{tasks[0].key}|{tasks[1].key}".encode()
|
||||
hash = md5(
|
||||
f"{researcher.key}|{writer.key}|{tasks[0].key}|{tasks[1].key}".encode(),
|
||||
usedforsecurity=False,
|
||||
).hexdigest()
|
||||
|
||||
assert crew.key == hash
|
||||
@@ -3534,8 +3527,9 @@ def test_key_with_interpolated_inputs():
|
||||
process=Process.sequential,
|
||||
tasks=tasks,
|
||||
)
|
||||
hash = hashlib.md5(
|
||||
f"{researcher.key}|{writer.key}|{tasks[0].key}|{tasks[1].key}".encode()
|
||||
hash = md5(
|
||||
f"{researcher.key}|{writer.key}|{tasks[0].key}|{tasks[1].key}".encode(),
|
||||
usedforsecurity=False,
|
||||
).hexdigest()
|
||||
|
||||
assert crew.key == hash
|
||||
@@ -3815,16 +3809,15 @@ def test_fetch_inputs():
|
||||
expected_placeholders = {"role_detail", "topic", "field"}
|
||||
actual_placeholders = crew.fetch_inputs()
|
||||
|
||||
assert (
|
||||
actual_placeholders == expected_placeholders
|
||||
), f"Expected {expected_placeholders}, but got {actual_placeholders}"
|
||||
assert actual_placeholders == expected_placeholders, (
|
||||
f"Expected {expected_placeholders}, but got {actual_placeholders}"
|
||||
)
|
||||
|
||||
|
||||
def test_task_tools_preserve_code_execution_tools():
|
||||
"""
|
||||
Test that task tools don't override code execution tools when allow_code_execution=True
|
||||
"""
|
||||
from typing import Type
|
||||
|
||||
# Mock embedchain initialization to prevent race conditions in parallel CI execution
|
||||
with patch("embedchain.client.Client.setup"):
|
||||
@@ -3841,7 +3834,7 @@ def test_task_tools_preserve_code_execution_tools():
|
||||
class TestTool(BaseTool):
|
||||
name: str = "Test Tool"
|
||||
description: str = "A test tool that just returns the input"
|
||||
args_schema: Type[BaseModel] = TestToolInput
|
||||
args_schema: type[BaseModel] = TestToolInput
|
||||
|
||||
def _run(self, query: str) -> str:
|
||||
return f"Processed: {query}"
|
||||
@@ -3892,20 +3885,20 @@ def test_task_tools_preserve_code_execution_tools():
|
||||
used_tools = kwargs["tools"]
|
||||
|
||||
# Verify all expected tools are present
|
||||
assert any(
|
||||
isinstance(tool, TestTool) for tool in used_tools
|
||||
), "Task's TestTool should be present"
|
||||
assert any(
|
||||
isinstance(tool, CodeInterpreterTool) for tool in used_tools
|
||||
), "CodeInterpreterTool should be present"
|
||||
assert any(
|
||||
"delegate" in tool.name.lower() for tool in used_tools
|
||||
), "Delegation tool should be present"
|
||||
assert any(isinstance(tool, TestTool) for tool in used_tools), (
|
||||
"Task's TestTool should be present"
|
||||
)
|
||||
assert any(isinstance(tool, CodeInterpreterTool) for tool in used_tools), (
|
||||
"CodeInterpreterTool should be present"
|
||||
)
|
||||
assert any("delegate" in tool.name.lower() for tool in used_tools), (
|
||||
"Delegation tool should be present"
|
||||
)
|
||||
|
||||
# Verify the total number of tools (TestTool + CodeInterpreter + 2 delegation tools)
|
||||
assert (
|
||||
len(used_tools) == 4
|
||||
), "Should have TestTool, CodeInterpreter, and 2 delegation tools"
|
||||
assert len(used_tools) == 4, (
|
||||
"Should have TestTool, CodeInterpreter, and 2 delegation tools"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -3949,9 +3942,9 @@ def test_multimodal_flag_adds_multimodal_tools():
|
||||
used_tools = kwargs["tools"]
|
||||
|
||||
# Check that the multimodal tool was added
|
||||
assert any(
|
||||
isinstance(tool, AddImageTool) for tool in used_tools
|
||||
), "AddImageTool should be present when agent is multimodal"
|
||||
assert any(isinstance(tool, AddImageTool) for tool in used_tools), (
|
||||
"AddImageTool should be present when agent is multimodal"
|
||||
)
|
||||
|
||||
# Verify we have exactly one tool (just the AddImageTool)
|
||||
assert len(used_tools) == 1, "Should only have the AddImageTool"
|
||||
@@ -4215,9 +4208,9 @@ def test_crew_guardrail_feedback_in_context():
|
||||
assert len(execution_contexts) > 1, "Task should have been executed multiple times"
|
||||
|
||||
# Verify that the second execution included the guardrail feedback
|
||||
assert (
|
||||
"Output must contain the keyword 'IMPORTANT'" in execution_contexts[1]
|
||||
), "Guardrail feedback should be included in retry context"
|
||||
assert "Output must contain the keyword 'IMPORTANT'" in execution_contexts[1], (
|
||||
"Guardrail feedback should be included in retry context"
|
||||
)
|
||||
|
||||
# Verify final output meets guardrail requirements
|
||||
assert "IMPORTANT" in result.raw, "Final output should contain required keyword"
|
||||
@@ -4232,13 +4225,11 @@ def test_before_kickoff_callback():
|
||||
|
||||
@CrewBase
|
||||
class TestCrewClass:
|
||||
from typing import List
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.project import CrewBase, agent, before_kickoff, crew, task
|
||||
|
||||
agents: List[BaseAgent]
|
||||
tasks: List[Task]
|
||||
agents: list[BaseAgent]
|
||||
tasks: list[Task]
|
||||
|
||||
agents_config = None
|
||||
tasks_config = None
|
||||
@@ -4262,12 +4253,11 @@ def test_before_kickoff_callback():
|
||||
|
||||
@task
|
||||
def my_task(self):
|
||||
task = Task(
|
||||
return Task(
|
||||
description="Test task description",
|
||||
expected_output="Test expected output",
|
||||
agent=self.my_agent(),
|
||||
)
|
||||
return task
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
@@ -4433,46 +4423,46 @@ def test_crew_copy_with_memory():
|
||||
try:
|
||||
crew_copy = crew.copy()
|
||||
|
||||
assert hasattr(
|
||||
crew_copy, "_short_term_memory"
|
||||
), "Copied crew should have _short_term_memory"
|
||||
assert (
|
||||
crew_copy._short_term_memory is not None
|
||||
), "Copied _short_term_memory should not be None"
|
||||
assert (
|
||||
id(crew_copy._short_term_memory) != original_short_term_id
|
||||
), "Copied _short_term_memory should be a new object"
|
||||
assert hasattr(crew_copy, "_short_term_memory"), (
|
||||
"Copied crew should have _short_term_memory"
|
||||
)
|
||||
assert crew_copy._short_term_memory is not None, (
|
||||
"Copied _short_term_memory should not be None"
|
||||
)
|
||||
assert id(crew_copy._short_term_memory) != original_short_term_id, (
|
||||
"Copied _short_term_memory should be a new object"
|
||||
)
|
||||
|
||||
assert hasattr(
|
||||
crew_copy, "_long_term_memory"
|
||||
), "Copied crew should have _long_term_memory"
|
||||
assert (
|
||||
crew_copy._long_term_memory is not None
|
||||
), "Copied _long_term_memory should not be None"
|
||||
assert (
|
||||
id(crew_copy._long_term_memory) != original_long_term_id
|
||||
), "Copied _long_term_memory should be a new object"
|
||||
assert hasattr(crew_copy, "_long_term_memory"), (
|
||||
"Copied crew should have _long_term_memory"
|
||||
)
|
||||
assert crew_copy._long_term_memory is not None, (
|
||||
"Copied _long_term_memory should not be None"
|
||||
)
|
||||
assert id(crew_copy._long_term_memory) != original_long_term_id, (
|
||||
"Copied _long_term_memory should be a new object"
|
||||
)
|
||||
|
||||
assert hasattr(
|
||||
crew_copy, "_entity_memory"
|
||||
), "Copied crew should have _entity_memory"
|
||||
assert (
|
||||
crew_copy._entity_memory is not None
|
||||
), "Copied _entity_memory should not be None"
|
||||
assert (
|
||||
id(crew_copy._entity_memory) != original_entity_id
|
||||
), "Copied _entity_memory should be a new object"
|
||||
assert hasattr(crew_copy, "_entity_memory"), (
|
||||
"Copied crew should have _entity_memory"
|
||||
)
|
||||
assert crew_copy._entity_memory is not None, (
|
||||
"Copied _entity_memory should not be None"
|
||||
)
|
||||
assert id(crew_copy._entity_memory) != original_entity_id, (
|
||||
"Copied _entity_memory should be a new object"
|
||||
)
|
||||
|
||||
if original_external_id:
|
||||
assert hasattr(
|
||||
crew_copy, "_external_memory"
|
||||
), "Copied crew should have _external_memory"
|
||||
assert (
|
||||
crew_copy._external_memory is not None
|
||||
), "Copied _external_memory should not be None"
|
||||
assert (
|
||||
id(crew_copy._external_memory) != original_external_id
|
||||
), "Copied _external_memory should be a new object"
|
||||
assert hasattr(crew_copy, "_external_memory"), (
|
||||
"Copied crew should have _external_memory"
|
||||
)
|
||||
assert crew_copy._external_memory is not None, (
|
||||
"Copied _external_memory should not be None"
|
||||
)
|
||||
assert id(crew_copy._external_memory) != original_external_id, (
|
||||
"Copied _external_memory should be a new object"
|
||||
)
|
||||
else:
|
||||
assert (
|
||||
not hasattr(crew_copy, "_external_memory")
|
||||
@@ -4735,21 +4725,25 @@ def test_ensure_exchanged_messages_are_propagated_to_external_memory():
|
||||
) as external_memory_save:
|
||||
crew.kickoff()
|
||||
|
||||
expected_messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are Researcher. You're an expert in research and you love to learn new things.\nYour personal goal is: You research about math.\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!",
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "\nCurrent Task: Research a topic to teach a kid aged 6 about math.\n\nThis is the expected criteria for your final answer: A topic, explanation, angle, and examples.\nyou MUST return the actual complete content as the final answer, not a summary.\n\n# Useful context: \nExternal memories:\n\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:",
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "I now can give a great answer \nFinal Answer: \n\n**Topic: Understanding Shapes (Geometry)**\n\n**Explanation:** \nShapes are everywhere around us! They are the special forms that we can see in everyday objects. Teaching a 6-year-old about shapes is not only fun but also a way to help them think about the world around them and develop their spatial awareness. We will focus on basic shapes: circle, square, triangle, and rectangle. Understanding these shapes helps kids recognize and describe their environment.\n\n**Angle:** \nLet’s make learning about shapes an adventure! We can turn it into a treasure hunt where the child has to find objects around the house or outside that match the shapes we learn. This hands-on approach helps make the learning stick!\n\n**Examples:** \n1. **Circle:** \n - Explanation: A circle is round and has no corners. It looks like a wheel or a cookie! \n - Activity: Find objects that are circles, such as a clock, a dinner plate, or a ball. Draw a big circle on a paper and then try to draw smaller circles inside it.\n\n2. **Square:** \n - Explanation: A square has four equal sides and four corners. It looks like a box! \n - Activity: Look for squares in books, in windows, or in building blocks. Try to build a tall tower using square blocks!\n\n3. **Triangle:** \n - Explanation: A triangle has three sides and three corners. It looks like a slice of pizza or a roof! \n - Activity: Use crayons to draw a big triangle and then find things that are shaped like a triangle, like a slice of cheese or a traffic sign.\n\n4. **Rectangle:** \n - Explanation: A rectangle has four sides but only opposite sides are equal. It’s like a stretched square! \n - Activity: Search for rectangles, such as a book cover or a door. You can cut out rectangles from colored paper and create a collage!\n\nBy relating the shapes to fun activities and using real-world examples, we not only make learning more enjoyable but also help the child better remember and understand the concept of shapes in math. This foundation forms the basis of their future learning in geometry!",
|
||||
},
|
||||
]
|
||||
external_memory_save.assert_called_once_with(
|
||||
value=ANY,
|
||||
metadata={"description": ANY, "messages": expected_messages},
|
||||
)
|
||||
external_memory_save.assert_called_once()
|
||||
|
||||
call_args = external_memory_save.call_args
|
||||
|
||||
assert "value" in call_args.kwargs or len(call_args.args) > 0
|
||||
assert "metadata" in call_args.kwargs or len(call_args.args) > 1
|
||||
|
||||
if "metadata" in call_args.kwargs:
|
||||
metadata = call_args.kwargs["metadata"]
|
||||
else:
|
||||
metadata = call_args.args[1]
|
||||
|
||||
assert "description" in metadata
|
||||
assert "messages" in metadata
|
||||
assert isinstance(metadata["messages"], list)
|
||||
assert len(metadata["messages"]) >= 2
|
||||
|
||||
messages = metadata["messages"]
|
||||
assert messages[0]["role"] == "system"
|
||||
assert "Researcher" in messages[0]["content"]
|
||||
assert messages[1]["role"] == "user"
|
||||
assert "Research a topic to teach a kid aged 6 about math" in messages[1]["content"]
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
"""Test Agent creation and execution basic functionality."""
|
||||
|
||||
import hashlib
|
||||
import ast
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
from functools import partial
|
||||
from typing import Tuple, Union
|
||||
from hashlib import md5
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
@@ -248,7 +248,7 @@ def test_guardrail_type_error():
|
||||
return (True, x)
|
||||
|
||||
@staticmethod
|
||||
def guardrail_static_fn(x: TaskOutput) -> tuple[bool, Union[str, TaskOutput]]:
|
||||
def guardrail_static_fn(x: TaskOutput) -> tuple[bool, str | TaskOutput]:
|
||||
return (True, x)
|
||||
|
||||
obj = Object()
|
||||
@@ -271,7 +271,7 @@ def test_guardrail_type_error():
|
||||
guardrail=Object.guardrail_static_fn,
|
||||
)
|
||||
|
||||
def error_fn(x: TaskOutput, y: bool) -> Tuple[bool, TaskOutput]:
|
||||
def error_fn(x: TaskOutput, y: bool) -> tuple[bool, TaskOutput]:
|
||||
return (y, x)
|
||||
|
||||
Task(
|
||||
@@ -340,7 +340,7 @@ def test_output_pydantic_hierarchical():
|
||||
)
|
||||
result = crew.kickoff()
|
||||
assert isinstance(result.pydantic, ScoreOutput)
|
||||
assert result.to_dict() == {"score": 5}
|
||||
assert result.to_dict() == {"score": 4}
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -401,8 +401,8 @@ def test_output_json_hierarchical():
|
||||
manager_llm="gpt-4o",
|
||||
)
|
||||
result = crew.kickoff()
|
||||
assert result.json == '{"score": 5}'
|
||||
assert result.to_dict() == {"score": 5}
|
||||
assert result.json == '{"score": 4}'
|
||||
assert result.to_dict() == {"score": 4}
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -560,8 +560,8 @@ def test_output_json_dict_hierarchical():
|
||||
manager_llm="gpt-4o",
|
||||
)
|
||||
result = crew.kickoff()
|
||||
assert {"score": 5} == result.json_dict
|
||||
assert result.to_dict() == {"score": 5}
|
||||
assert {"score": 4} == result.json_dict
|
||||
assert result.to_dict() == {"score": 4}
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -900,11 +900,11 @@ def test_conditional_task_copy_preserves_type():
|
||||
assert isinstance(copied_conditional_task, ConditionalTask)
|
||||
|
||||
|
||||
def test_interpolate_inputs():
|
||||
def test_interpolate_inputs(tmp_path):
|
||||
task = Task(
|
||||
description="Give me a list of 5 interesting ideas about {topic} to explore for an article, what makes them unique and interesting.",
|
||||
expected_output="Bullet point list of 5 interesting ideas about {topic}.",
|
||||
output_file="/tmp/{topic}/output_{date}.txt",
|
||||
output_file=str(tmp_path / "{topic}" / "output_{date}.txt"),
|
||||
)
|
||||
|
||||
task.interpolate_inputs_and_add_conversation_history(
|
||||
@@ -915,7 +915,7 @@ def test_interpolate_inputs():
|
||||
== "Give me a list of 5 interesting ideas about AI to explore for an article, what makes them unique and interesting."
|
||||
)
|
||||
assert task.expected_output == "Bullet point list of 5 interesting ideas about AI."
|
||||
assert task.output_file == "/tmp/AI/output_2025.txt"
|
||||
assert task.output_file == str(tmp_path / "AI" / "output_2025.txt")
|
||||
|
||||
task.interpolate_inputs_and_add_conversation_history(
|
||||
inputs={"topic": "ML", "date": "2025"}
|
||||
@@ -925,7 +925,7 @@ def test_interpolate_inputs():
|
||||
== "Give me a list of 5 interesting ideas about ML to explore for an article, what makes them unique and interesting."
|
||||
)
|
||||
assert task.expected_output == "Bullet point list of 5 interesting ideas about ML."
|
||||
assert task.output_file == "/tmp/ML/output_2025.txt"
|
||||
assert task.output_file == str(tmp_path / "ML" / "output_2025.txt")
|
||||
|
||||
|
||||
def test_interpolate_only():
|
||||
@@ -1074,8 +1074,9 @@ def test_key():
|
||||
description=original_description,
|
||||
expected_output=original_expected_output,
|
||||
)
|
||||
hash = hashlib.md5(
|
||||
f"{original_description}|{original_expected_output}".encode()
|
||||
hash = md5(
|
||||
f"{original_description}|{original_expected_output}".encode(),
|
||||
usedforsecurity=False,
|
||||
).hexdigest()
|
||||
|
||||
assert task.key == hash, "The key should be the hash of the description."
|
||||
@@ -1086,7 +1087,7 @@ def test_key():
|
||||
)
|
||||
|
||||
|
||||
def test_output_file_validation():
|
||||
def test_output_file_validation(tmp_path):
|
||||
"""Test output file path validation."""
|
||||
# Valid paths
|
||||
assert (
|
||||
@@ -1097,13 +1098,15 @@ def test_output_file_validation():
|
||||
).output_file
|
||||
== "output.txt"
|
||||
)
|
||||
# Use secure temporary path instead of /tmp
|
||||
temp_file = tmp_path / "output.txt"
|
||||
assert (
|
||||
Task(
|
||||
description="Test task",
|
||||
expected_output="Test output",
|
||||
output_file="/tmp/output.txt",
|
||||
output_file=str(temp_file),
|
||||
).output_file
|
||||
== "tmp/output.txt"
|
||||
== str(temp_file).lstrip("/") # Remove leading slash to match expected behavior
|
||||
)
|
||||
assert (
|
||||
Task(
|
||||
@@ -1320,7 +1323,7 @@ def test_interpolate_with_list_of_dicts():
|
||||
}
|
||||
result = interpolate_only("{people}", input_data)
|
||||
|
||||
parsed_result = eval(result)
|
||||
parsed_result = ast.literal_eval(result)
|
||||
assert isinstance(parsed_result, list)
|
||||
assert len(parsed_result) == 2
|
||||
assert parsed_result[0]["name"] == "Alice"
|
||||
@@ -1346,7 +1349,7 @@ def test_interpolate_with_nested_structures():
|
||||
}
|
||||
}
|
||||
result = interpolate_only("{company}", input_data)
|
||||
parsed = eval(result)
|
||||
parsed = ast.literal_eval(result)
|
||||
|
||||
assert parsed["name"] == "TechCorp"
|
||||
assert len(parsed["departments"]) == 2
|
||||
@@ -1364,7 +1367,7 @@ def test_interpolate_with_special_characters():
|
||||
}
|
||||
}
|
||||
result = interpolate_only("{special_data}", input_data)
|
||||
parsed = eval(result)
|
||||
parsed = ast.literal_eval(result)
|
||||
|
||||
assert parsed["quotes"] == """This has "double" and 'single' quotes"""
|
||||
assert parsed["unicode"] == "文字化けテスト"
|
||||
@@ -1386,7 +1389,7 @@ def test_interpolate_mixed_types():
|
||||
}
|
||||
}
|
||||
result = interpolate_only("{data}", input_data)
|
||||
parsed = eval(result)
|
||||
parsed = ast.literal_eval(result)
|
||||
|
||||
assert parsed["name"] == "Test Dataset"
|
||||
assert parsed["samples"] == 1000
|
||||
@@ -1409,7 +1412,7 @@ def test_interpolate_complex_combination():
|
||||
]
|
||||
}
|
||||
result = interpolate_only("{report}", input_data)
|
||||
parsed = eval(result)
|
||||
parsed = ast.literal_eval(result)
|
||||
|
||||
assert len(parsed) == 2
|
||||
assert parsed[0]["month"] == "January"
|
||||
@@ -1482,7 +1485,7 @@ def test_interpolate_valid_complex_types():
|
||||
|
||||
# Should not raise any errors
|
||||
result = interpolate_only("{data}", {"data": valid_data})
|
||||
parsed = eval(result)
|
||||
parsed = ast.literal_eval(result)
|
||||
assert parsed["name"] == "Valid Dataset"
|
||||
assert parsed["stats"]["nested"]["deeper"]["b"] == 2.5
|
||||
|
||||
@@ -1512,7 +1515,7 @@ def test_interpolate_valid_types():
|
||||
}
|
||||
|
||||
result = interpolate_only("{data}", {"data": valid_data})
|
||||
parsed = eval(result)
|
||||
parsed = ast.literal_eval(result)
|
||||
|
||||
assert parsed["active"] is True
|
||||
assert parsed["deleted"] is False
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
from typing import Optional
|
||||
|
||||
import pytest
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
@@ -39,6 +37,7 @@ def test_initialization(basic_function, schema_class):
|
||||
assert tool.func == basic_function
|
||||
assert tool.args_schema == schema_class
|
||||
|
||||
|
||||
def test_from_function(basic_function):
|
||||
"""Test creating tool from function"""
|
||||
tool = CrewStructuredTool.from_function(
|
||||
@@ -50,6 +49,7 @@ def test_from_function(basic_function):
|
||||
assert tool.func == basic_function
|
||||
assert isinstance(tool.args_schema, type(BaseModel))
|
||||
|
||||
|
||||
def test_validate_function_signature(basic_function, schema_class):
|
||||
"""Test function signature validation"""
|
||||
tool = CrewStructuredTool(
|
||||
@@ -62,6 +62,7 @@ def test_validate_function_signature(basic_function, schema_class):
|
||||
# Should not raise any exceptions
|
||||
tool._validate_function_signature()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_ainvoke(basic_function):
|
||||
"""Test asynchronous invocation"""
|
||||
@@ -70,6 +71,7 @@ async def test_ainvoke(basic_function):
|
||||
result = await tool.ainvoke(input={"param1": "test"})
|
||||
assert result == "test 0"
|
||||
|
||||
|
||||
def test_parse_args_dict(basic_function):
|
||||
"""Test parsing dictionary arguments"""
|
||||
tool = CrewStructuredTool.from_function(func=basic_function, name="test_tool")
|
||||
@@ -78,6 +80,7 @@ def test_parse_args_dict(basic_function):
|
||||
assert parsed["param1"] == "test"
|
||||
assert parsed["param2"] == 42
|
||||
|
||||
|
||||
def test_parse_args_string(basic_function):
|
||||
"""Test parsing string arguments"""
|
||||
tool = CrewStructuredTool.from_function(func=basic_function, name="test_tool")
|
||||
@@ -86,6 +89,7 @@ def test_parse_args_string(basic_function):
|
||||
assert parsed["param1"] == "test"
|
||||
assert parsed["param2"] == 42
|
||||
|
||||
|
||||
def test_complex_types():
|
||||
"""Test handling of complex parameter types"""
|
||||
|
||||
@@ -99,6 +103,7 @@ def test_complex_types():
|
||||
result = tool.invoke({"nested": {"key": "value"}, "items": [1, 2, 3]})
|
||||
assert result == "Processed 3 items with 1 nested keys"
|
||||
|
||||
|
||||
def test_schema_inheritance():
|
||||
"""Test tool creation with inherited schema"""
|
||||
|
||||
@@ -119,13 +124,14 @@ def test_schema_inheritance():
|
||||
result = tool.invoke({"base_param": "test", "extra_param": 42})
|
||||
assert result == "test 42"
|
||||
|
||||
|
||||
def test_default_values_in_schema():
|
||||
"""Test handling of default values in schema"""
|
||||
|
||||
def default_func(
|
||||
required_param: str,
|
||||
optional_param: str = "default",
|
||||
nullable_param: Optional[int] = None,
|
||||
nullable_param: int | None = None,
|
||||
) -> str:
|
||||
"""Test function with default values."""
|
||||
return f"{required_param} {optional_param} {nullable_param}"
|
||||
@@ -144,6 +150,7 @@ def test_default_values_in_schema():
|
||||
)
|
||||
assert result == "test custom 42"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def custom_tool_decorator():
|
||||
from crewai.tools import tool
|
||||
@@ -155,6 +162,7 @@ def custom_tool_decorator():
|
||||
|
||||
return custom_tool
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def custom_tool():
|
||||
from crewai.tools import BaseTool
|
||||
@@ -169,17 +177,25 @@ def custom_tool():
|
||||
|
||||
return CustomTool()
|
||||
|
||||
|
||||
def build_simple_crew(tool):
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai import Agent, Crew, Task
|
||||
|
||||
agent1 = Agent(role="Simple role", goal="Simple goal", backstory="Simple backstory", tools=[tool])
|
||||
|
||||
say_hi_task = Task(
|
||||
description="Use the custom tool result as answer.", agent=agent1, expected_output="Use the tool result"
|
||||
agent1 = Agent(
|
||||
role="Simple role",
|
||||
goal="Simple goal",
|
||||
backstory="Simple backstory",
|
||||
tools=[tool],
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent1], tasks=[say_hi_task])
|
||||
return crew
|
||||
say_hi_task = Task(
|
||||
description="Use the custom tool result as answer.",
|
||||
agent=agent1,
|
||||
expected_output="Use the tool result",
|
||||
)
|
||||
|
||||
return Crew(agents=[agent1], tasks=[say_hi_task])
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_async_tool_using_within_isolated_crew(custom_tool):
|
||||
@@ -188,6 +204,7 @@ def test_async_tool_using_within_isolated_crew(custom_tool):
|
||||
|
||||
assert result.raw == "Hello World from Custom Tool"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_async_tool_using_decorator_within_isolated_crew(custom_tool_decorator):
|
||||
crew = build_simple_crew(custom_tool_decorator)
|
||||
@@ -195,6 +212,7 @@ def test_async_tool_using_decorator_within_isolated_crew(custom_tool_decorator):
|
||||
|
||||
assert result.raw == "Hello World from Custom Tool"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_async_tool_within_flow(custom_tool):
|
||||
from crewai.flow.flow import Flow
|
||||
@@ -205,8 +223,7 @@ def test_async_tool_within_flow(custom_tool):
|
||||
@start()
|
||||
async def start(self):
|
||||
crew = build_simple_crew(custom_tool)
|
||||
result = await crew.kickoff_async()
|
||||
return result
|
||||
return await crew.kickoff_async()
|
||||
|
||||
flow = StructuredExampleFlow()
|
||||
result = flow.kickoff()
|
||||
@@ -219,12 +236,141 @@ def test_async_tool_using_decorator_within_flow(custom_tool_decorator):
|
||||
|
||||
class StructuredExampleFlow(Flow):
|
||||
from crewai.flow.flow import start
|
||||
|
||||
@start()
|
||||
async def start(self):
|
||||
crew = build_simple_crew(custom_tool_decorator)
|
||||
result = await crew.kickoff_async()
|
||||
return result
|
||||
return await crew.kickoff_async()
|
||||
|
||||
flow = StructuredExampleFlow()
|
||||
result = flow.kickoff()
|
||||
assert result.raw == "Hello World from Custom Tool"
|
||||
assert result.raw == "Hello World from Custom Tool"
|
||||
|
||||
|
||||
def test_structured_tool_invoke_calls_func_only_once():
|
||||
"""Test that CrewStructuredTool.invoke() calls the underlying function exactly once."""
|
||||
call_count = 0
|
||||
call_history = []
|
||||
|
||||
def counting_function(param: str) -> str:
|
||||
"""Function that tracks how many times it's called."""
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
call_history.append(f"Call #{call_count} with param: {param}")
|
||||
return f"Result from call #{call_count}: {param}"
|
||||
|
||||
# Create CrewStructuredTool directly
|
||||
tool = CrewStructuredTool.from_function(
|
||||
func=counting_function,
|
||||
name="direct_test_tool",
|
||||
description="Tool to test direct invoke() method",
|
||||
)
|
||||
|
||||
# Call invoke() directly - this is where the bug was
|
||||
result = tool.invoke({"param": "test_value"})
|
||||
|
||||
# Critical assertions that would catch the duplicate execution bug
|
||||
assert call_count == 1, (
|
||||
f"DUPLICATE EXECUTION BUG: Function was called {call_count} times instead of 1. "
|
||||
f"This means CrewStructuredTool.invoke() has duplicate function calls. "
|
||||
f"Call history: {call_history}"
|
||||
)
|
||||
|
||||
assert len(call_history) == 1, (
|
||||
f"Expected 1 call in history, got {len(call_history)}: {call_history}"
|
||||
)
|
||||
|
||||
assert call_history[0] == "Call #1 with param: test_value", (
|
||||
f"Expected 'Call #1 with param: test_value', got: {call_history[0]}"
|
||||
)
|
||||
|
||||
assert result == "Result from call #1: test_value", (
|
||||
f"Expected result from first call, got: {result}"
|
||||
)
|
||||
|
||||
|
||||
def test_structured_tool_invoke_multiple_calls_increment_correctly():
|
||||
"""Test multiple calls to invoke() to ensure each increments correctly."""
|
||||
call_count = 0
|
||||
|
||||
def incrementing_function(value: int) -> int:
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
return value + call_count
|
||||
|
||||
tool = CrewStructuredTool.from_function(
|
||||
func=incrementing_function,
|
||||
name="incrementing_tool",
|
||||
description="Tool that increments on each call",
|
||||
)
|
||||
|
||||
result1 = tool.invoke({"value": 10})
|
||||
assert call_count == 1, (
|
||||
f"After first invoke, expected call_count=1, got {call_count}"
|
||||
)
|
||||
assert result1 == 11, f"Expected 11 (10+1), got {result1}"
|
||||
|
||||
result2 = tool.invoke({"value": 20})
|
||||
assert call_count == 2, (
|
||||
f"After second invoke, expected call_count=2, got {call_count}"
|
||||
)
|
||||
assert result2 == 22, f"Expected 22 (20+2), got {result2}"
|
||||
|
||||
result3 = tool.invoke({"value": 30})
|
||||
assert call_count == 3, (
|
||||
f"After third invoke, expected call_count=3, got {call_count}"
|
||||
)
|
||||
assert result3 == 33, f"Expected 33 (30+3), got {result3}"
|
||||
|
||||
|
||||
def test_structured_tool_invoke_with_side_effects():
|
||||
"""Test that side effects only happen once per invoke() call."""
|
||||
side_effects = []
|
||||
|
||||
def side_effect_function(action: str) -> str:
|
||||
side_effects.append(f"SIDE_EFFECT: {action} executed at call")
|
||||
return f"Action {action} completed"
|
||||
|
||||
tool = CrewStructuredTool.from_function(
|
||||
func=side_effect_function,
|
||||
name="side_effect_tool",
|
||||
description="Tool with observable side effects",
|
||||
)
|
||||
|
||||
result = tool.invoke({"action": "write_file"})
|
||||
|
||||
assert len(side_effects) == 1, (
|
||||
f"SIDE EFFECT BUG: Expected 1 side effect, got {len(side_effects)}. "
|
||||
f"This indicates the function was called multiple times. "
|
||||
f"Side effects: {side_effects}"
|
||||
)
|
||||
|
||||
assert side_effects[0] == "SIDE_EFFECT: write_file executed at call"
|
||||
assert result == "Action write_file completed"
|
||||
|
||||
|
||||
def test_structured_tool_invoke_exception_handling():
|
||||
"""Test that exceptions don't cause duplicate execution."""
|
||||
call_count = 0
|
||||
|
||||
def failing_function(should_fail: bool) -> str:
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
if should_fail:
|
||||
raise ValueError(f"Intentional failure on call #{call_count}")
|
||||
return f"Success on call #{call_count}"
|
||||
|
||||
tool = CrewStructuredTool.from_function(
|
||||
func=failing_function, name="failing_tool", description="Tool that can fail"
|
||||
)
|
||||
|
||||
result = tool.invoke({"should_fail": False})
|
||||
assert call_count == 1, f"Expected 1 call for success case, got {call_count}"
|
||||
assert result == "Success on call #1"
|
||||
|
||||
call_count = 0
|
||||
|
||||
with pytest.raises(ValueError, match="Intentional failure on call #1"):
|
||||
tool.invoke({"should_fail": True})
|
||||
|
||||
assert call_count == 1
|
||||
|
||||
Reference in New Issue
Block a user