mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-10 00:28:31 +00:00
Merge branch 'main' into lorenze/trace-improvements-3
This commit is contained in:
27
.github/workflows/tests.yml
vendored
27
.github/workflows/tests.yml
vendored
@@ -22,6 +22,8 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0 # Fetch all history for proper diff
|
||||||
|
|
||||||
- name: Restore global uv cache
|
- name: Restore global uv cache
|
||||||
id: cache-restore
|
id: cache-restore
|
||||||
@@ -49,22 +51,29 @@ jobs:
|
|||||||
uses: actions/cache/restore@v4
|
uses: actions/cache/restore@v4
|
||||||
with:
|
with:
|
||||||
path: .test_durations_py*
|
path: .test_durations_py*
|
||||||
key: test-durations-py${{ matrix.python-version }}-
|
key: test-durations-py${{ matrix.python-version }}
|
||||||
restore-keys: |
|
|
||||||
test-durations-py${{ matrix.python-version }}-
|
|
||||||
|
|
||||||
- name: Run tests (group ${{ matrix.group }} of 8)
|
- name: Run tests (group ${{ matrix.group }} of 8)
|
||||||
run: |
|
run: |
|
||||||
PYTHON_VERSION_SAFE=$(echo "${{ matrix.python-version }}" | tr '.' '_')
|
PYTHON_VERSION_SAFE=$(echo "${{ matrix.python-version }}" | tr '.' '_')
|
||||||
DURATION_FILE=".test_durations_py${PYTHON_VERSION_SAFE}"
|
DURATION_FILE=".test_durations_py${PYTHON_VERSION_SAFE}"
|
||||||
|
|
||||||
if [ -f "$DURATION_FILE" ]; then
|
# Temporarily always skip cached durations to fix test splitting
|
||||||
echo "Using cached test durations for optimal splitting"
|
# When durations don't match, pytest-split runs duplicate tests instead of splitting
|
||||||
DURATIONS_ARG="--durations-path=${DURATION_FILE}"
|
echo "Using even test splitting (duration cache disabled until fix merged)"
|
||||||
else
|
|
||||||
echo "No cached durations found, tests will be split evenly"
|
|
||||||
DURATIONS_ARG=""
|
DURATIONS_ARG=""
|
||||||
fi
|
|
||||||
|
# Original logic (disabled temporarily):
|
||||||
|
# if [ ! -f "$DURATION_FILE" ]; then
|
||||||
|
# echo "No cached durations found, tests will be split evenly"
|
||||||
|
# DURATIONS_ARG=""
|
||||||
|
# elif git diff origin/${{ github.base_ref }}...HEAD --name-only 2>/dev/null | grep -q "^tests/.*\.py$"; then
|
||||||
|
# echo "Test files have changed, skipping cached durations to avoid mismatches"
|
||||||
|
# DURATIONS_ARG=""
|
||||||
|
# else
|
||||||
|
# echo "No test changes detected, using cached test durations for optimal splitting"
|
||||||
|
# DURATIONS_ARG="--durations-path=${DURATION_FILE}"
|
||||||
|
# fi
|
||||||
|
|
||||||
uv run pytest \
|
uv run pytest \
|
||||||
--block-network \
|
--block-network \
|
||||||
|
|||||||
2
.github/workflows/update-test-durations.yml
vendored
2
.github/workflows/update-test-durations.yml
vendored
@@ -58,7 +58,7 @@ jobs:
|
|||||||
uses: actions/cache/save@v4
|
uses: actions/cache/save@v4
|
||||||
with:
|
with:
|
||||||
path: .test_durations_py*
|
path: .test_durations_py*
|
||||||
key: test-durations-py${{ matrix.python-version }}-${{ github.sha }}
|
key: test-durations-py${{ matrix.python-version }}
|
||||||
|
|
||||||
- name: Save uv caches
|
- name: Save uv caches
|
||||||
if: steps.cache-restore.outputs.cache-hit != 'true'
|
if: steps.cache-restore.outputs.cache-hit != 'true'
|
||||||
|
|||||||
@@ -1,75 +1,96 @@
|
|||||||
from typing import Any, List, Optional
|
"""OpenAI agents adapter for CrewAI integration.
|
||||||
|
|
||||||
from pydantic import Field, PrivateAttr
|
This module contains the OpenAIAgentAdapter class that integrates OpenAI Assistants
|
||||||
|
with CrewAI's agent system, providing tool integration and structured output support.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from typing import Any, cast
|
||||||
|
|
||||||
|
from pydantic import ConfigDict, Field, PrivateAttr
|
||||||
|
from typing_extensions import Unpack
|
||||||
|
|
||||||
from crewai.agents.agent_adapters.base_agent_adapter import BaseAgentAdapter
|
from crewai.agents.agent_adapters.base_agent_adapter import BaseAgentAdapter
|
||||||
|
from crewai.agents.agent_adapters.openai_agents.openai_agent_tool_adapter import (
|
||||||
|
OpenAIAgentToolAdapter,
|
||||||
|
)
|
||||||
|
from crewai.agents.agent_adapters.openai_agents.protocols import (
|
||||||
|
AgentKwargs,
|
||||||
|
OpenAIAgentsModule,
|
||||||
|
)
|
||||||
|
from crewai.agents.agent_adapters.openai_agents.protocols import (
|
||||||
|
OpenAIAgent as OpenAIAgentProtocol,
|
||||||
|
)
|
||||||
from crewai.agents.agent_adapters.openai_agents.structured_output_converter import (
|
from crewai.agents.agent_adapters.openai_agents.structured_output_converter import (
|
||||||
OpenAIConverterAdapter,
|
OpenAIConverterAdapter,
|
||||||
)
|
)
|
||||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||||
from crewai.tools import BaseTool
|
|
||||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
|
||||||
from crewai.utilities import Logger
|
|
||||||
from crewai.events.event_bus import crewai_event_bus
|
from crewai.events.event_bus import crewai_event_bus
|
||||||
from crewai.events.types.agent_events import (
|
from crewai.events.types.agent_events import (
|
||||||
AgentExecutionCompletedEvent,
|
AgentExecutionCompletedEvent,
|
||||||
AgentExecutionErrorEvent,
|
AgentExecutionErrorEvent,
|
||||||
AgentExecutionStartedEvent,
|
AgentExecutionStartedEvent,
|
||||||
)
|
)
|
||||||
|
from crewai.tools import BaseTool
|
||||||
|
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||||
|
from crewai.utilities import Logger
|
||||||
|
from crewai.utilities.import_utils import require
|
||||||
|
|
||||||
try:
|
openai_agents_module = cast(
|
||||||
from agents import Agent as OpenAIAgent # type: ignore
|
OpenAIAgentsModule,
|
||||||
from agents import Runner, enable_verbose_stdout_logging # type: ignore
|
require(
|
||||||
|
"agents",
|
||||||
from .openai_agent_tool_adapter import OpenAIAgentToolAdapter
|
purpose="OpenAI agents functionality",
|
||||||
|
),
|
||||||
OPENAI_AVAILABLE = True
|
)
|
||||||
except ImportError:
|
OpenAIAgent = openai_agents_module.Agent
|
||||||
OPENAI_AVAILABLE = False
|
Runner = openai_agents_module.Runner
|
||||||
|
enable_verbose_stdout_logging = openai_agents_module.enable_verbose_stdout_logging
|
||||||
|
|
||||||
|
|
||||||
class OpenAIAgentAdapter(BaseAgentAdapter):
|
class OpenAIAgentAdapter(BaseAgentAdapter):
|
||||||
"""Adapter for OpenAI Assistants"""
|
"""Adapter for OpenAI Assistants.
|
||||||
|
|
||||||
model_config = {"arbitrary_types_allowed": True}
|
Integrates OpenAI Assistants API with CrewAI's agent system, providing
|
||||||
|
tool configuration, structured output handling, and task execution.
|
||||||
|
"""
|
||||||
|
|
||||||
_openai_agent: "OpenAIAgent" = PrivateAttr()
|
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||||
_logger: Logger = PrivateAttr(default_factory=lambda: Logger())
|
|
||||||
_active_thread: Optional[str] = PrivateAttr(default=None)
|
_openai_agent: OpenAIAgentProtocol = PrivateAttr()
|
||||||
|
_logger: Logger = PrivateAttr(default_factory=Logger)
|
||||||
|
_active_thread: str | None = PrivateAttr(default=None)
|
||||||
function_calling_llm: Any = Field(default=None)
|
function_calling_llm: Any = Field(default=None)
|
||||||
step_callback: Any = Field(default=None)
|
step_callback: Any = Field(default=None)
|
||||||
_tool_adapter: "OpenAIAgentToolAdapter" = PrivateAttr()
|
_tool_adapter: OpenAIAgentToolAdapter = PrivateAttr()
|
||||||
_converter_adapter: OpenAIConverterAdapter = PrivateAttr()
|
_converter_adapter: OpenAIConverterAdapter = PrivateAttr()
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
model: str = "gpt-4o-mini",
|
**kwargs: Unpack[AgentKwargs],
|
||||||
tools: Optional[List[BaseTool]] = None,
|
) -> None:
|
||||||
agent_config: Optional[dict] = None,
|
"""Initialize the OpenAI agent adapter.
|
||||||
**kwargs,
|
|
||||||
):
|
Args:
|
||||||
if not OPENAI_AVAILABLE:
|
**kwargs: All initialization arguments including role, goal, backstory,
|
||||||
raise ImportError(
|
model, tools, and agent_config.
|
||||||
"OpenAI Agent Dependencies are not installed. Please install it using `uv add openai-agents`"
|
|
||||||
)
|
Raises:
|
||||||
else:
|
ImportError: If OpenAI agent dependencies are not installed.
|
||||||
role = kwargs.pop("role", None)
|
"""
|
||||||
goal = kwargs.pop("goal", None)
|
super().__init__(**kwargs)
|
||||||
backstory = kwargs.pop("backstory", None)
|
self._tool_adapter = OpenAIAgentToolAdapter(tools=kwargs.get("tools"))
|
||||||
super().__init__(
|
self.llm = kwargs.get("model", "gpt-4o-mini")
|
||||||
role=role,
|
self._converter_adapter = OpenAIConverterAdapter(agent_adapter=self)
|
||||||
goal=goal,
|
|
||||||
backstory=backstory,
|
|
||||||
tools=tools,
|
|
||||||
agent_config=agent_config,
|
|
||||||
**kwargs,
|
|
||||||
)
|
|
||||||
self._tool_adapter = OpenAIAgentToolAdapter(tools=tools)
|
|
||||||
self.llm = model
|
|
||||||
self._converter_adapter = OpenAIConverterAdapter(self)
|
|
||||||
|
|
||||||
def _build_system_prompt(self) -> str:
|
def _build_system_prompt(self) -> str:
|
||||||
"""Build a system prompt for the OpenAI agent."""
|
"""Build a system prompt for the OpenAI agent.
|
||||||
|
|
||||||
|
Creates a prompt containing the agent's role, goal, and backstory,
|
||||||
|
then enhances it with structured output instructions if needed.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The complete system prompt string.
|
||||||
|
"""
|
||||||
base_prompt = f"""
|
base_prompt = f"""
|
||||||
You are {self.role}.
|
You are {self.role}.
|
||||||
|
|
||||||
@@ -84,10 +105,25 @@ class OpenAIAgentAdapter(BaseAgentAdapter):
|
|||||||
def execute_task(
|
def execute_task(
|
||||||
self,
|
self,
|
||||||
task: Any,
|
task: Any,
|
||||||
context: Optional[str] = None,
|
context: str | None = None,
|
||||||
tools: Optional[List[BaseTool]] = None,
|
tools: list[BaseTool] | None = None,
|
||||||
) -> str:
|
) -> str:
|
||||||
"""Execute a task using the OpenAI Assistant"""
|
"""Execute a task using the OpenAI Assistant.
|
||||||
|
|
||||||
|
Configures the assistant, processes the task, and handles event emission
|
||||||
|
for execution tracking.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
task: The task object to execute.
|
||||||
|
context: Optional context information for the task.
|
||||||
|
tools: Optional additional tools for this execution.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The final answer from the task execution.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
Exception: If task execution fails.
|
||||||
|
"""
|
||||||
self._converter_adapter.configure_structured_output(task)
|
self._converter_adapter.configure_structured_output(task)
|
||||||
self.create_agent_executor(tools)
|
self.create_agent_executor(tools)
|
||||||
|
|
||||||
@@ -95,7 +131,7 @@ class OpenAIAgentAdapter(BaseAgentAdapter):
|
|||||||
enable_verbose_stdout_logging()
|
enable_verbose_stdout_logging()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
task_prompt = task.prompt()
|
task_prompt: str = task.prompt()
|
||||||
if context:
|
if context:
|
||||||
task_prompt = self.i18n.slice("task_with_context").format(
|
task_prompt = self.i18n.slice("task_with_context").format(
|
||||||
task=task_prompt, context=context
|
task=task_prompt, context=context
|
||||||
@@ -109,8 +145,8 @@ class OpenAIAgentAdapter(BaseAgentAdapter):
|
|||||||
task=task,
|
task=task,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
result = self.agent_executor.run_sync(self._openai_agent, task_prompt)
|
result: Any = self.agent_executor.run_sync(self._openai_agent, task_prompt)
|
||||||
final_answer = self.handle_execution_result(result)
|
final_answer: str = self.handle_execution_result(result)
|
||||||
crewai_event_bus.emit(
|
crewai_event_bus.emit(
|
||||||
self,
|
self,
|
||||||
event=AgentExecutionCompletedEvent(
|
event=AgentExecutionCompletedEvent(
|
||||||
@@ -120,7 +156,7 @@ class OpenAIAgentAdapter(BaseAgentAdapter):
|
|||||||
return final_answer
|
return final_answer
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self._logger.log("error", f"Error executing OpenAI task: {str(e)}")
|
self._logger.log("error", f"Error executing OpenAI task: {e!s}")
|
||||||
crewai_event_bus.emit(
|
crewai_event_bus.emit(
|
||||||
self,
|
self,
|
||||||
event=AgentExecutionErrorEvent(
|
event=AgentExecutionErrorEvent(
|
||||||
@@ -131,15 +167,22 @@ class OpenAIAgentAdapter(BaseAgentAdapter):
|
|||||||
)
|
)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
def create_agent_executor(self, tools: Optional[List[BaseTool]] = None) -> None:
|
def create_agent_executor(self, tools: list[BaseTool] | None = None) -> None:
|
||||||
"""
|
"""Configure the OpenAI agent for execution.
|
||||||
Configure the OpenAI agent for execution.
|
|
||||||
While OpenAI handles execution differently through Runner,
|
|
||||||
we can use this method to set up tools and configurations.
|
|
||||||
"""
|
|
||||||
all_tools = list(self.tools or []) + list(tools or [])
|
|
||||||
|
|
||||||
instructions = self._build_system_prompt()
|
While OpenAI handles execution differently through Runner,
|
||||||
|
this method sets up tools and agent configuration.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
tools: Optional tools to configure for the agent.
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
TODO: Properly type agent_executor in BaseAgent to avoid type issues
|
||||||
|
when assigning Runner class to this attribute.
|
||||||
|
"""
|
||||||
|
all_tools: list[BaseTool] = list(self.tools or []) + list(tools or [])
|
||||||
|
|
||||||
|
instructions: str = self._build_system_prompt()
|
||||||
self._openai_agent = OpenAIAgent(
|
self._openai_agent = OpenAIAgent(
|
||||||
name=self.role,
|
name=self.role,
|
||||||
instructions=instructions,
|
instructions=instructions,
|
||||||
@@ -152,27 +195,48 @@ class OpenAIAgentAdapter(BaseAgentAdapter):
|
|||||||
|
|
||||||
self.agent_executor = Runner
|
self.agent_executor = Runner
|
||||||
|
|
||||||
def configure_tools(self, tools: Optional[List[BaseTool]] = None) -> None:
|
def configure_tools(self, tools: list[BaseTool] | None = None) -> None:
|
||||||
"""Configure tools for the OpenAI Assistant"""
|
"""Configure tools for the OpenAI Assistant.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
tools: Optional tools to configure for the assistant.
|
||||||
|
"""
|
||||||
if tools:
|
if tools:
|
||||||
self._tool_adapter.configure_tools(tools)
|
self._tool_adapter.configure_tools(tools)
|
||||||
if self._tool_adapter.converted_tools:
|
if self._tool_adapter.converted_tools:
|
||||||
self._openai_agent.tools = self._tool_adapter.converted_tools
|
self._openai_agent.tools = self._tool_adapter.converted_tools
|
||||||
|
|
||||||
def handle_execution_result(self, result: Any) -> str:
|
def handle_execution_result(self, result: Any) -> str:
|
||||||
"""Process OpenAI Assistant execution result converting any structured output to a string"""
|
"""Process OpenAI Assistant execution result.
|
||||||
|
|
||||||
|
Converts any structured output to a string through the converter adapter.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
result: The execution result from the OpenAI assistant.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Processed result as a string.
|
||||||
|
"""
|
||||||
return self._converter_adapter.post_process_result(result.final_output)
|
return self._converter_adapter.post_process_result(result.final_output)
|
||||||
|
|
||||||
def get_delegation_tools(self, agents: List[BaseAgent]) -> List[BaseTool]:
|
def get_delegation_tools(self, agents: list[BaseAgent]) -> list[BaseTool]:
|
||||||
"""Implement delegation tools support"""
|
"""Implement delegation tools support.
|
||||||
agent_tools = AgentTools(agents=agents)
|
|
||||||
tools = agent_tools.tools()
|
|
||||||
return tools
|
|
||||||
|
|
||||||
def configure_structured_output(self, task) -> None:
|
Creates delegation tools that allow this agent to delegate tasks to other agents.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
agents: List of agents available for delegation.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of delegation tools.
|
||||||
|
"""
|
||||||
|
agent_tools: AgentTools = AgentTools(agents=agents)
|
||||||
|
return agent_tools.tools()
|
||||||
|
|
||||||
|
def configure_structured_output(self, task: Any) -> None:
|
||||||
"""Configure the structured output for the specific agent implementation.
|
"""Configure the structured output for the specific agent implementation.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
structured_output: The structured output to be configured
|
task: The task object containing output format specifications.
|
||||||
"""
|
"""
|
||||||
self._converter_adapter.configure_structured_output(task)
|
self._converter_adapter.configure_structured_output(task)
|
||||||
|
|||||||
@@ -1,57 +1,125 @@
|
|||||||
import inspect
|
"""OpenAI agent tool adapter for CrewAI tool integration.
|
||||||
from typing import Any, List, Optional
|
|
||||||
|
|
||||||
from agents import FunctionTool, Tool
|
This module contains the OpenAIAgentToolAdapter class that converts CrewAI tools
|
||||||
|
to OpenAI Assistant-compatible format using the agents library.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import inspect
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
from collections.abc import Awaitable
|
||||||
|
from typing import Any, cast
|
||||||
|
|
||||||
from crewai.agents.agent_adapters.base_tool_adapter import BaseToolAdapter
|
from crewai.agents.agent_adapters.base_tool_adapter import BaseToolAdapter
|
||||||
|
from crewai.agents.agent_adapters.openai_agents.protocols import (
|
||||||
|
OpenAIFunctionTool,
|
||||||
|
OpenAITool,
|
||||||
|
)
|
||||||
from crewai.tools import BaseTool
|
from crewai.tools import BaseTool
|
||||||
|
from crewai.utilities.import_utils import require
|
||||||
|
|
||||||
|
agents_module = cast(
|
||||||
|
Any,
|
||||||
|
require(
|
||||||
|
"agents",
|
||||||
|
purpose="OpenAI agents functionality",
|
||||||
|
),
|
||||||
|
)
|
||||||
|
FunctionTool = agents_module.FunctionTool
|
||||||
|
Tool = agents_module.Tool
|
||||||
|
|
||||||
|
|
||||||
class OpenAIAgentToolAdapter(BaseToolAdapter):
|
class OpenAIAgentToolAdapter(BaseToolAdapter):
|
||||||
"""Adapter for OpenAI Assistant tools"""
|
"""Adapter for OpenAI Assistant tools.
|
||||||
|
|
||||||
def __init__(self, tools: Optional[List[BaseTool]] = None):
|
Converts CrewAI BaseTool instances to OpenAI Assistant FunctionTool format
|
||||||
self.original_tools = tools or []
|
that can be used by OpenAI agents.
|
||||||
|
"""
|
||||||
|
|
||||||
def configure_tools(self, tools: List[BaseTool]) -> None:
|
def __init__(self, tools: list[BaseTool] | None = None) -> None:
|
||||||
"""Configure tools for the OpenAI Assistant"""
|
"""Initialize the tool adapter.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
tools: Optional list of CrewAI tools to adapt.
|
||||||
|
"""
|
||||||
|
super().__init__()
|
||||||
|
self.original_tools: list[BaseTool] = tools or []
|
||||||
|
self.converted_tools: list[OpenAITool] = []
|
||||||
|
|
||||||
|
def configure_tools(self, tools: list[BaseTool]) -> None:
|
||||||
|
"""Configure tools for the OpenAI Assistant.
|
||||||
|
|
||||||
|
Merges provided tools with original tools and converts them to
|
||||||
|
OpenAI Assistant format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
tools: List of CrewAI tools to configure.
|
||||||
|
"""
|
||||||
if self.original_tools:
|
if self.original_tools:
|
||||||
all_tools = tools + self.original_tools
|
all_tools: list[BaseTool] = tools + self.original_tools
|
||||||
else:
|
else:
|
||||||
all_tools = tools
|
all_tools = tools
|
||||||
if all_tools:
|
if all_tools:
|
||||||
self.converted_tools = self._convert_tools_to_openai_format(all_tools)
|
self.converted_tools = self._convert_tools_to_openai_format(all_tools)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
def _convert_tools_to_openai_format(
|
def _convert_tools_to_openai_format(
|
||||||
self, tools: Optional[List[BaseTool]]
|
tools: list[BaseTool] | None,
|
||||||
) -> List[Tool]:
|
) -> list[OpenAITool]:
|
||||||
"""Convert CrewAI tools to OpenAI Assistant tool format"""
|
"""Convert CrewAI tools to OpenAI Assistant tool format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
tools: List of CrewAI tools to convert.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of OpenAI Assistant FunctionTool instances.
|
||||||
|
"""
|
||||||
if not tools:
|
if not tools:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
def sanitize_tool_name(name: str) -> str:
|
def sanitize_tool_name(name: str) -> str:
|
||||||
"""Convert tool name to match OpenAI's required pattern"""
|
"""Convert tool name to match OpenAI's required pattern.
|
||||||
import re
|
|
||||||
|
|
||||||
sanitized = re.sub(r"[^a-zA-Z0-9_-]", "_", name).lower()
|
Args:
|
||||||
return sanitized
|
name: Original tool name.
|
||||||
|
|
||||||
def create_tool_wrapper(tool: BaseTool):
|
Returns:
|
||||||
"""Create a wrapper function that handles the OpenAI function tool interface"""
|
Sanitized tool name matching OpenAI requirements.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return re.sub(r"[^a-zA-Z0-9_-]", "_", name).lower()
|
||||||
|
|
||||||
|
def create_tool_wrapper(tool: BaseTool) -> Any:
|
||||||
|
"""Create a wrapper function that handles the OpenAI function tool interface.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
tool: The CrewAI tool to wrap.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Async wrapper function for OpenAI agent integration.
|
||||||
|
"""
|
||||||
|
|
||||||
async def wrapper(context_wrapper: Any, arguments: Any) -> Any:
|
async def wrapper(context_wrapper: Any, arguments: Any) -> Any:
|
||||||
|
"""Wrapper function to adapt CrewAI tool calls to OpenAI format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
context_wrapper: OpenAI context wrapper.
|
||||||
|
arguments: Tool arguments from OpenAI.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tool execution result.
|
||||||
|
"""
|
||||||
# Get the parameter name from the schema
|
# Get the parameter name from the schema
|
||||||
param_name = list(
|
param_name: str = next(
|
||||||
tool.args_schema.model_json_schema()["properties"].keys()
|
iter(tool.args_schema.model_json_schema()["properties"].keys())
|
||||||
)[0]
|
)
|
||||||
|
|
||||||
# Handle different argument types
|
# Handle different argument types
|
||||||
|
args_dict: dict[str, Any]
|
||||||
if isinstance(arguments, dict):
|
if isinstance(arguments, dict):
|
||||||
args_dict = arguments
|
args_dict = arguments
|
||||||
elif isinstance(arguments, str):
|
elif isinstance(arguments, str):
|
||||||
try:
|
try:
|
||||||
import json
|
|
||||||
|
|
||||||
args_dict = json.loads(arguments)
|
args_dict = json.loads(arguments)
|
||||||
except json.JSONDecodeError:
|
except json.JSONDecodeError:
|
||||||
args_dict = {param_name: arguments}
|
args_dict = {param_name: arguments}
|
||||||
@@ -59,11 +127,11 @@ class OpenAIAgentToolAdapter(BaseToolAdapter):
|
|||||||
args_dict = {param_name: str(arguments)}
|
args_dict = {param_name: str(arguments)}
|
||||||
|
|
||||||
# Run the tool with the processed arguments
|
# Run the tool with the processed arguments
|
||||||
output = tool._run(**args_dict)
|
output: Any | Awaitable[Any] = tool._run(**args_dict)
|
||||||
|
|
||||||
# Await if the tool returned a coroutine
|
# Await if the tool returned a coroutine
|
||||||
if inspect.isawaitable(output):
|
if inspect.isawaitable(output):
|
||||||
result = await output
|
result: Any = await output
|
||||||
else:
|
else:
|
||||||
result = output
|
result = output
|
||||||
|
|
||||||
@@ -74,17 +142,20 @@ class OpenAIAgentToolAdapter(BaseToolAdapter):
|
|||||||
|
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
openai_tools = []
|
openai_tools: list[OpenAITool] = []
|
||||||
for tool in tools:
|
for tool in tools:
|
||||||
schema = tool.args_schema.model_json_schema()
|
schema: dict[str, Any] = tool.args_schema.model_json_schema()
|
||||||
|
|
||||||
schema.update({"additionalProperties": False, "type": "object"})
|
schema.update({"additionalProperties": False, "type": "object"})
|
||||||
|
|
||||||
openai_tool = FunctionTool(
|
openai_tool: OpenAIFunctionTool = cast(
|
||||||
|
OpenAIFunctionTool,
|
||||||
|
FunctionTool(
|
||||||
name=sanitize_tool_name(tool.name),
|
name=sanitize_tool_name(tool.name),
|
||||||
description=tool.description,
|
description=tool.description,
|
||||||
params_json_schema=schema,
|
params_json_schema=schema,
|
||||||
on_invoke_tool=create_tool_wrapper(tool),
|
on_invoke_tool=create_tool_wrapper(tool),
|
||||||
|
),
|
||||||
)
|
)
|
||||||
openai_tools.append(openai_tool)
|
openai_tools.append(openai_tool)
|
||||||
|
|
||||||
|
|||||||
74
src/crewai/agents/agent_adapters/openai_agents/protocols.py
Normal file
74
src/crewai/agents/agent_adapters/openai_agents/protocols.py
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
"""Type protocols for OpenAI agents modules."""
|
||||||
|
|
||||||
|
from collections.abc import Callable
|
||||||
|
from typing import Any, Protocol, TypedDict, runtime_checkable
|
||||||
|
|
||||||
|
from crewai.tools.base_tool import BaseTool
|
||||||
|
|
||||||
|
|
||||||
|
class AgentKwargs(TypedDict, total=False):
|
||||||
|
"""Typed dict for agent initialization kwargs."""
|
||||||
|
|
||||||
|
role: str
|
||||||
|
goal: str
|
||||||
|
backstory: str
|
||||||
|
model: str
|
||||||
|
tools: list[BaseTool] | None
|
||||||
|
agent_config: dict[str, Any] | None
|
||||||
|
|
||||||
|
|
||||||
|
@runtime_checkable
|
||||||
|
class OpenAIAgent(Protocol):
|
||||||
|
"""Protocol for OpenAI Agent."""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
name: str,
|
||||||
|
instructions: str,
|
||||||
|
model: str,
|
||||||
|
**kwargs: Any,
|
||||||
|
) -> None:
|
||||||
|
"""Initialize the OpenAI agent."""
|
||||||
|
...
|
||||||
|
|
||||||
|
tools: list[Any]
|
||||||
|
output_type: Any
|
||||||
|
|
||||||
|
|
||||||
|
@runtime_checkable
|
||||||
|
class OpenAIRunner(Protocol):
|
||||||
|
"""Protocol for OpenAI Runner."""
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def run_sync(cls, agent: OpenAIAgent, message: str) -> Any:
|
||||||
|
"""Run agent synchronously with a message."""
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
|
@runtime_checkable
|
||||||
|
class OpenAIAgentsModule(Protocol):
|
||||||
|
"""Protocol for OpenAI agents module."""
|
||||||
|
|
||||||
|
Agent: type[OpenAIAgent]
|
||||||
|
Runner: type[OpenAIRunner]
|
||||||
|
enable_verbose_stdout_logging: Callable[[], None]
|
||||||
|
|
||||||
|
|
||||||
|
@runtime_checkable
|
||||||
|
class OpenAITool(Protocol):
|
||||||
|
"""Protocol for OpenAI Tool."""
|
||||||
|
|
||||||
|
|
||||||
|
@runtime_checkable
|
||||||
|
class OpenAIFunctionTool(Protocol):
|
||||||
|
"""Protocol for OpenAI FunctionTool."""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
name: str,
|
||||||
|
description: str,
|
||||||
|
params_json_schema: dict[str, Any],
|
||||||
|
on_invoke_tool: Any,
|
||||||
|
) -> None:
|
||||||
|
"""Initialize the function tool."""
|
||||||
|
...
|
||||||
@@ -1,5 +1,12 @@
|
|||||||
|
"""OpenAI structured output converter for CrewAI task integration.
|
||||||
|
|
||||||
|
This module contains the OpenAIConverterAdapter class that handles structured
|
||||||
|
output conversion for OpenAI agents, supporting JSON and Pydantic model formats.
|
||||||
|
"""
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
|
from typing import Any, Literal
|
||||||
|
|
||||||
from crewai.agents.agent_adapters.base_converter_adapter import BaseConverterAdapter
|
from crewai.agents.agent_adapters.base_converter_adapter import BaseConverterAdapter
|
||||||
from crewai.utilities.converter import generate_model_description
|
from crewai.utilities.converter import generate_model_description
|
||||||
@@ -7,8 +14,7 @@ from crewai.utilities.i18n import I18N
|
|||||||
|
|
||||||
|
|
||||||
class OpenAIConverterAdapter(BaseConverterAdapter):
|
class OpenAIConverterAdapter(BaseConverterAdapter):
|
||||||
"""
|
"""Adapter for handling structured output conversion in OpenAI agents.
|
||||||
Adapter for handling structured output conversion in OpenAI agents.
|
|
||||||
|
|
||||||
This adapter enhances the OpenAI agent to handle structured output formats
|
This adapter enhances the OpenAI agent to handle structured output formats
|
||||||
and post-processes the results when needed.
|
and post-processes the results when needed.
|
||||||
@@ -19,19 +25,23 @@ class OpenAIConverterAdapter(BaseConverterAdapter):
|
|||||||
_output_model: The Pydantic model for the output
|
_output_model: The Pydantic model for the output
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, agent_adapter):
|
def __init__(self, agent_adapter: Any) -> None:
|
||||||
"""Initialize the converter adapter with a reference to the agent adapter"""
|
"""Initialize the converter adapter with a reference to the agent adapter.
|
||||||
self.agent_adapter = agent_adapter
|
|
||||||
self._output_format = None
|
|
||||||
self._schema = None
|
|
||||||
self._output_model = None
|
|
||||||
|
|
||||||
def configure_structured_output(self, task) -> None:
|
|
||||||
"""
|
|
||||||
Configure the structured output for OpenAI agent based on task requirements.
|
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
task: The task containing output format requirements
|
agent_adapter: The OpenAI agent adapter instance.
|
||||||
|
"""
|
||||||
|
super().__init__(agent_adapter=agent_adapter)
|
||||||
|
self.agent_adapter: Any = agent_adapter
|
||||||
|
self._output_format: Literal["json", "pydantic"] | None = None
|
||||||
|
self._schema: str | None = None
|
||||||
|
self._output_model: Any = None
|
||||||
|
|
||||||
|
def configure_structured_output(self, task: Any) -> None:
|
||||||
|
"""Configure the structured output for OpenAI agent based on task requirements.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
task: The task containing output format requirements.
|
||||||
"""
|
"""
|
||||||
# Reset configuration
|
# Reset configuration
|
||||||
self._output_format = None
|
self._output_format = None
|
||||||
@@ -55,19 +65,18 @@ class OpenAIConverterAdapter(BaseConverterAdapter):
|
|||||||
self._output_model = task.output_pydantic
|
self._output_model = task.output_pydantic
|
||||||
|
|
||||||
def enhance_system_prompt(self, base_prompt: str) -> str:
|
def enhance_system_prompt(self, base_prompt: str) -> str:
|
||||||
"""
|
"""Enhance the base system prompt with structured output requirements if needed.
|
||||||
Enhance the base system prompt with structured output requirements if needed.
|
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
base_prompt: The original system prompt
|
base_prompt: The original system prompt.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Enhanced system prompt with output format instructions if needed
|
Enhanced system prompt with output format instructions if needed.
|
||||||
"""
|
"""
|
||||||
if not self._output_format:
|
if not self._output_format:
|
||||||
return base_prompt
|
return base_prompt
|
||||||
|
|
||||||
output_schema = (
|
output_schema: str = (
|
||||||
I18N()
|
I18N()
|
||||||
.slice("formatted_task_instructions")
|
.slice("formatted_task_instructions")
|
||||||
.format(output_format=self._schema)
|
.format(output_format=self._schema)
|
||||||
@@ -76,16 +85,15 @@ class OpenAIConverterAdapter(BaseConverterAdapter):
|
|||||||
return f"{base_prompt}\n\n{output_schema}"
|
return f"{base_prompt}\n\n{output_schema}"
|
||||||
|
|
||||||
def post_process_result(self, result: str) -> str:
|
def post_process_result(self, result: str) -> str:
|
||||||
"""
|
"""Post-process the result to ensure it matches the expected format.
|
||||||
Post-process the result to ensure it matches the expected format.
|
|
||||||
|
|
||||||
This method attempts to extract valid JSON from the result if necessary.
|
This method attempts to extract valid JSON from the result if necessary.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
result: The raw result from the agent
|
result: The raw result from the agent.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Processed result conforming to the expected output format
|
Processed result conforming to the expected output format.
|
||||||
"""
|
"""
|
||||||
if not self._output_format:
|
if not self._output_format:
|
||||||
return result
|
return result
|
||||||
@@ -97,26 +105,30 @@ class OpenAIConverterAdapter(BaseConverterAdapter):
|
|||||||
return result
|
return result
|
||||||
except json.JSONDecodeError:
|
except json.JSONDecodeError:
|
||||||
# Try to extract JSON from markdown code blocks
|
# Try to extract JSON from markdown code blocks
|
||||||
code_block_pattern = r"```(?:json)?\s*([\s\S]*?)```"
|
code_block_pattern: str = r"```(?:json)?\s*([\s\S]*?)```"
|
||||||
code_blocks = re.findall(code_block_pattern, result)
|
code_blocks: list[str] = re.findall(code_block_pattern, result)
|
||||||
|
|
||||||
for block in code_blocks:
|
for block in code_blocks:
|
||||||
|
stripped_block = block.strip()
|
||||||
try:
|
try:
|
||||||
json.loads(block.strip())
|
json.loads(stripped_block)
|
||||||
return block.strip()
|
return stripped_block
|
||||||
except json.JSONDecodeError:
|
except json.JSONDecodeError:
|
||||||
continue
|
pass
|
||||||
|
|
||||||
# Try to extract any JSON-like structure
|
# Try to extract any JSON-like structure
|
||||||
json_pattern = r"(\{[\s\S]*\})"
|
json_pattern: str = r"(\{[\s\S]*\})"
|
||||||
json_matches = re.findall(json_pattern, result, re.DOTALL)
|
json_matches: list[str] = re.findall(json_pattern, result, re.DOTALL)
|
||||||
|
|
||||||
for match in json_matches:
|
for match in json_matches:
|
||||||
|
is_valid = True
|
||||||
try:
|
try:
|
||||||
json.loads(match)
|
json.loads(match)
|
||||||
return match
|
|
||||||
except json.JSONDecodeError:
|
except json.JSONDecodeError:
|
||||||
continue
|
is_valid = False
|
||||||
|
|
||||||
|
if is_valid:
|
||||||
|
return match
|
||||||
|
|
||||||
# If all extraction attempts fail, return the original
|
# If all extraction attempts fail, return the original
|
||||||
return str(result)
|
return str(result)
|
||||||
|
|||||||
Reference in New Issue
Block a user