Compare commits

..

1 Commits

Author SHA1 Message Date
Devin AI
9af03058fe fix: skip signal handler registration in non-main thread
When CrewAI is initialized from a non-main thread (e.g., in Streamlit,
Flask, Django, Jupyter), the telemetry module was printing multiple
ValueError tracebacks for each signal handler registration attempt.

This fix adds a proactive main thread check in _register_shutdown_handlers()
before attempting signal registration. If not in the main thread, a debug
message is logged and signal handler registration is skipped.

Fixes #4289

Co-Authored-By: João <joao@crewai.com>
2026-01-27 19:45:52 +00:00
29 changed files with 1126 additions and 1791 deletions

View File

@@ -152,4 +152,4 @@ __all__ = [
"wrap_file_source", "wrap_file_source",
] ]
__version__ = "1.9.1" __version__ = "1.9.0"

View File

@@ -12,7 +12,7 @@ dependencies = [
"pytube~=15.0.0", "pytube~=15.0.0",
"requests~=2.32.5", "requests~=2.32.5",
"docker~=7.1.0", "docker~=7.1.0",
"crewai==1.9.1", "crewai==1.9.0",
"lancedb~=0.5.4", "lancedb~=0.5.4",
"tiktoken~=0.8.0", "tiktoken~=0.8.0",
"beautifulsoup4~=4.13.4", "beautifulsoup4~=4.13.4",

View File

@@ -291,4 +291,4 @@ __all__ = [
"ZapierActionTools", "ZapierActionTools",
] ]
__version__ = "1.9.1" __version__ = "1.9.0"

View File

@@ -1,11 +1,10 @@
"""Crewai Enterprise Tools.""" """Crewai Enterprise Tools."""
import json
import os import os
from typing import Any import json
import re
from typing import Any, Optional, Union, cast, get_origin
from crewai.tools import BaseTool from crewai.tools import BaseTool
from crewai.utilities.pydantic_schema_utils import create_model_from_schema
from pydantic import Field, create_model from pydantic import Field, create_model
import requests import requests
@@ -15,6 +14,77 @@ from crewai_tools.tools.crewai_platform_tools.misc import (
) )
class AllOfSchemaAnalyzer:
"""Helper class to analyze and merge allOf schemas."""
def __init__(self, schemas: list[dict[str, Any]]):
self.schemas = schemas
self._explicit_types: list[str] = []
self._merged_properties: dict[str, Any] = {}
self._merged_required: list[str] = []
self._analyze_schemas()
def _analyze_schemas(self) -> None:
"""Analyze all schemas and extract relevant information."""
for schema in self.schemas:
if "type" in schema:
self._explicit_types.append(schema["type"])
# Merge object properties
if schema.get("type") == "object" and "properties" in schema:
self._merged_properties.update(schema["properties"])
if "required" in schema:
self._merged_required.extend(schema["required"])
def has_consistent_type(self) -> bool:
"""Check if all schemas have the same explicit type."""
return len(set(self._explicit_types)) == 1 if self._explicit_types else False
def get_consistent_type(self) -> type[Any]:
"""Get the consistent type if all schemas agree."""
if not self.has_consistent_type():
raise ValueError("No consistent type found")
type_mapping = {
"string": str,
"integer": int,
"number": float,
"boolean": bool,
"array": list,
"object": dict,
"null": type(None),
}
return type_mapping.get(self._explicit_types[0], str)
def has_object_schemas(self) -> bool:
"""Check if any schemas are object types with properties."""
return bool(self._merged_properties)
def get_merged_properties(self) -> dict[str, Any]:
"""Get merged properties from all object schemas."""
return self._merged_properties
def get_merged_required_fields(self) -> list[str]:
"""Get merged required fields from all object schemas."""
return list(set(self._merged_required)) # Remove duplicates
def get_fallback_type(self) -> type[Any]:
"""Get a fallback type when merging fails."""
if self._explicit_types:
# Use the first explicit type
type_mapping = {
"string": str,
"integer": int,
"number": float,
"boolean": bool,
"array": list,
"object": dict,
"null": type(None),
}
return type_mapping.get(self._explicit_types[0], str)
return str
class CrewAIPlatformActionTool(BaseTool): class CrewAIPlatformActionTool(BaseTool):
action_name: str = Field(default="", description="The name of the action") action_name: str = Field(default="", description="The name of the action")
action_schema: dict[str, Any] = Field( action_schema: dict[str, Any] = Field(
@@ -27,19 +97,42 @@ class CrewAIPlatformActionTool(BaseTool):
action_name: str, action_name: str,
action_schema: dict[str, Any], action_schema: dict[str, Any],
): ):
parameters = action_schema.get("function", {}).get("parameters", {}) self._model_registry: dict[str, type[Any]] = {}
self._base_name = self._sanitize_name(action_name)
schema_props, required = self._extract_schema_info(action_schema)
field_definitions: dict[str, Any] = {}
for param_name, param_details in schema_props.items():
param_desc = param_details.get("description", "")
is_required = param_name in required
if parameters and parameters.get("properties"):
try: try:
if "title" not in parameters: field_type = self._process_schema_type(
parameters = {**parameters, "title": f"{action_name}Schema"} param_details, self._sanitize_name(param_name).title()
if "type" not in parameters: )
parameters = {**parameters, "type": "object"}
args_schema = create_model_from_schema(parameters)
except Exception: except Exception:
args_schema = create_model(f"{action_name}Schema") field_type = str
field_definitions[param_name] = self._create_field_definition(
field_type, is_required, param_desc
)
if field_definitions:
try:
args_schema = create_model(
f"{self._base_name}Schema", **field_definitions
)
except Exception:
args_schema = create_model(
f"{self._base_name}Schema",
input_text=(str, Field(description="Input for the action")),
)
else: else:
args_schema = create_model(f"{action_name}Schema") args_schema = create_model(
f"{self._base_name}Schema",
input_text=(str, Field(description="Input for the action")),
)
super().__init__( super().__init__(
name=action_name.lower().replace(" ", "_"), name=action_name.lower().replace(" ", "_"),
@@ -49,12 +142,285 @@ class CrewAIPlatformActionTool(BaseTool):
self.action_name = action_name self.action_name = action_name
self.action_schema = action_schema self.action_schema = action_schema
def _run(self, **kwargs: Any) -> str: @staticmethod
def _sanitize_name(name: str) -> str:
name = name.lower().replace(" ", "_")
sanitized = re.sub(r"[^a-zA-Z0-9_]", "", name)
parts = sanitized.split("_")
return "".join(word.capitalize() for word in parts if word)
@staticmethod
def _extract_schema_info(
action_schema: dict[str, Any],
) -> tuple[dict[str, Any], list[str]]:
schema_props = (
action_schema.get("function", {})
.get("parameters", {})
.get("properties", {})
)
required = (
action_schema.get("function", {}).get("parameters", {}).get("required", [])
)
return schema_props, required
def _process_schema_type(self, schema: dict[str, Any], type_name: str) -> type[Any]:
"""
Process a JSON Schema type definition into a Python type.
Handles complex schema constructs like anyOf, oneOf, allOf, enums, arrays, and objects.
"""
# Handle composite schema types (anyOf, oneOf, allOf)
if composite_type := self._process_composite_schema(schema, type_name):
return composite_type
# Handle primitive types and simple constructs
return self._process_primitive_schema(schema, type_name)
def _process_composite_schema(
self, schema: dict[str, Any], type_name: str
) -> type[Any] | None:
"""Process composite schema types: anyOf, oneOf, allOf."""
if "anyOf" in schema:
return self._process_any_of_schema(schema["anyOf"], type_name)
if "oneOf" in schema:
return self._process_one_of_schema(schema["oneOf"], type_name)
if "allOf" in schema:
return self._process_all_of_schema(schema["allOf"], type_name)
return None
def _process_any_of_schema(
self, any_of_types: list[dict[str, Any]], type_name: str
) -> type[Any]:
"""Process anyOf schema - creates Union of possible types."""
is_nullable = any(t.get("type") == "null" for t in any_of_types)
non_null_types = [t for t in any_of_types if t.get("type") != "null"]
if not non_null_types:
return cast(
type[Any], cast(object, str | None)
) # fallback for only-null case
base_type = (
self._process_schema_type(non_null_types[0], type_name)
if len(non_null_types) == 1
else self._create_union_type(non_null_types, type_name, "AnyOf")
)
return base_type | None if is_nullable else base_type # type: ignore[return-value]
def _process_one_of_schema(
self, one_of_types: list[dict[str, Any]], type_name: str
) -> type[Any]:
"""Process oneOf schema - creates Union of mutually exclusive types."""
return (
self._process_schema_type(one_of_types[0], type_name)
if len(one_of_types) == 1
else self._create_union_type(one_of_types, type_name, "OneOf")
)
def _process_all_of_schema(
self, all_of_schemas: list[dict[str, Any]], type_name: str
) -> type[Any]:
"""Process allOf schema - merges schemas that must all be satisfied."""
if len(all_of_schemas) == 1:
return self._process_schema_type(all_of_schemas[0], type_name)
return self._merge_all_of_schemas(all_of_schemas, type_name)
def _create_union_type(
self, schemas: list[dict[str, Any]], type_name: str, prefix: str
) -> type[Any]:
"""Create a Union type from multiple schemas."""
return Union[ # type: ignore # noqa: UP007
tuple(
self._process_schema_type(schema, f"{type_name}{prefix}{i}")
for i, schema in enumerate(schemas)
)
]
def _process_primitive_schema(
self, schema: dict[str, Any], type_name: str
) -> type[Any]:
"""Process primitive schema types: string, number, array, object, etc."""
json_type = schema.get("type", "string")
if "enum" in schema:
return self._process_enum_schema(schema, json_type)
if json_type == "array":
return self._process_array_schema(schema, type_name)
if json_type == "object":
return self._create_nested_model(schema, type_name)
return self._map_json_type_to_python(json_type)
def _process_enum_schema(self, schema: dict[str, Any], json_type: str) -> type[Any]:
"""Process enum schema - currently falls back to base type."""
enum_values = schema["enum"]
if not enum_values:
return self._map_json_type_to_python(json_type)
# For Literal types, we need to pass the values directly, not as a tuple
# This is a workaround since we can't dynamically create Literal types easily
# Fall back to the base JSON type for now
return self._map_json_type_to_python(json_type)
def _process_array_schema(
self, schema: dict[str, Any], type_name: str
) -> type[Any]:
items_schema = schema.get("items", {"type": "string"})
item_type = self._process_schema_type(items_schema, f"{type_name}Item")
return list[item_type] # type: ignore
def _merge_all_of_schemas(
self, schemas: list[dict[str, Any]], type_name: str
) -> type[Any]:
schema_analyzer = AllOfSchemaAnalyzer(schemas)
if schema_analyzer.has_consistent_type():
return schema_analyzer.get_consistent_type()
if schema_analyzer.has_object_schemas():
return self._create_merged_object_model(
schema_analyzer.get_merged_properties(),
schema_analyzer.get_merged_required_fields(),
type_name,
)
return schema_analyzer.get_fallback_type()
def _create_merged_object_model(
self, properties: dict[str, Any], required: list[str], model_name: str
) -> type[Any]:
full_model_name = f"{self._base_name}{model_name}AllOf"
if full_model_name in self._model_registry:
return self._model_registry[full_model_name]
if not properties:
return dict
field_definitions = self._build_field_definitions(
properties, required, model_name
)
try:
merged_model = create_model(full_model_name, **field_definitions)
self._model_registry[full_model_name] = merged_model
return merged_model
except Exception:
return dict
def _build_field_definitions(
self, properties: dict[str, Any], required: list[str], model_name: str
) -> dict[str, Any]:
field_definitions = {}
for prop_name, prop_schema in properties.items():
prop_desc = prop_schema.get("description", "")
is_required = prop_name in required
try:
prop_type = self._process_schema_type(
prop_schema, f"{model_name}{self._sanitize_name(prop_name).title()}"
)
except Exception:
prop_type = str
field_definitions[prop_name] = self._create_field_definition(
prop_type, is_required, prop_desc
)
return field_definitions
def _create_nested_model(
self, schema: dict[str, Any], model_name: str
) -> type[Any]:
full_model_name = f"{self._base_name}{model_name}"
if full_model_name in self._model_registry:
return self._model_registry[full_model_name]
properties = schema.get("properties", {})
required_fields = schema.get("required", [])
if not properties:
return dict
field_definitions = {}
for prop_name, prop_schema in properties.items():
prop_desc = prop_schema.get("description", "")
is_required = prop_name in required_fields
try:
prop_type = self._process_schema_type(
prop_schema, f"{model_name}{self._sanitize_name(prop_name).title()}"
)
except Exception:
prop_type = str
field_definitions[prop_name] = self._create_field_definition(
prop_type, is_required, prop_desc
)
try:
nested_model = create_model(full_model_name, **field_definitions) # type: ignore
self._model_registry[full_model_name] = nested_model
return nested_model
except Exception:
return dict
def _create_field_definition(
self, field_type: type[Any], is_required: bool, description: str
) -> tuple:
if is_required:
return (field_type, Field(description=description))
if get_origin(field_type) is Union:
return (field_type, Field(default=None, description=description))
return (
Optional[field_type], # noqa: UP045
Field(default=None, description=description),
)
def _map_json_type_to_python(self, json_type: str) -> type[Any]:
type_mapping = {
"string": str,
"integer": int,
"number": float,
"boolean": bool,
"array": list,
"object": dict,
"null": type(None),
}
return type_mapping.get(json_type, str)
def _get_required_nullable_fields(self) -> list[str]:
schema_props, required = self._extract_schema_info(self.action_schema)
required_nullable_fields = []
for param_name in required:
param_details = schema_props.get(param_name, {})
if self._is_nullable_type(param_details):
required_nullable_fields.append(param_name)
return required_nullable_fields
def _is_nullable_type(self, schema: dict[str, Any]) -> bool:
if "anyOf" in schema:
return any(t.get("type") == "null" for t in schema["anyOf"])
return schema.get("type") == "null"
def _run(self, **kwargs) -> str:
try: try:
cleaned_kwargs = { cleaned_kwargs = {
key: value for key, value in kwargs.items() if value is not None key: value for key, value in kwargs.items() if value is not None
} }
required_nullable_fields = self._get_required_nullable_fields()
for field_name in required_nullable_fields:
if field_name not in cleaned_kwargs:
cleaned_kwargs[field_name] = None
api_url = ( api_url = (
f"{get_platform_api_base_url()}/actions/{self.action_name}/execute" f"{get_platform_api_base_url()}/actions/{self.action_name}/execute"
) )
@@ -63,9 +429,7 @@ class CrewAIPlatformActionTool(BaseTool):
"Authorization": f"Bearer {token}", "Authorization": f"Bearer {token}",
"Content-Type": "application/json", "Content-Type": "application/json",
} }
payload = { payload = cleaned_kwargs
"integration": cleaned_kwargs if cleaned_kwargs else {"_noop": True}
}
response = requests.post( response = requests.post(
url=api_url, url=api_url,
@@ -77,14 +441,7 @@ class CrewAIPlatformActionTool(BaseTool):
data = response.json() data = response.json()
if not response.ok: if not response.ok:
if isinstance(data, dict): error_message = data.get("error", {}).get("message", json.dumps(data))
error_info = data.get("error", {})
if isinstance(error_info, dict):
error_message = error_info.get("message", json.dumps(data))
else:
error_message = str(error_info)
else:
error_message = str(data)
return f"API request failed: {error_message}" return f"API request failed: {error_message}"
return json.dumps(data, indent=2) return json.dumps(data, indent=2)

View File

@@ -1,10 +1,5 @@
"""CrewAI platform tool builder for fetching and creating action tools."""
import logging
import os
from types import TracebackType
from typing import Any from typing import Any
import os
from crewai.tools import BaseTool from crewai.tools import BaseTool
import requests import requests
@@ -17,29 +12,22 @@ from crewai_tools.tools.crewai_platform_tools.misc import (
) )
logger = logging.getLogger(__name__)
class CrewaiPlatformToolBuilder: class CrewaiPlatformToolBuilder:
"""Builds platform tools from remote action schemas."""
def __init__( def __init__(
self, self,
apps: list[str], apps: list[str],
) -> None: ):
self._apps = apps self._apps = apps
self._actions_schema: dict[str, dict[str, Any]] = {} self._actions_schema = {} # type: ignore[var-annotated]
self._tools: list[BaseTool] | None = None self._tools = None
def tools(self) -> list[BaseTool]: def tools(self) -> list[BaseTool]:
"""Fetch actions and return built tools."""
if self._tools is None: if self._tools is None:
self._fetch_actions() self._fetch_actions()
self._create_tools() self._create_tools()
return self._tools if self._tools is not None else [] return self._tools if self._tools is not None else []
def _fetch_actions(self) -> None: def _fetch_actions(self):
"""Fetch action schemas from the platform API."""
actions_url = f"{get_platform_api_base_url()}/actions" actions_url = f"{get_platform_api_base_url()}/actions"
headers = {"Authorization": f"Bearer {get_platform_integration_token()}"} headers = {"Authorization": f"Bearer {get_platform_integration_token()}"}
@@ -52,8 +40,7 @@ class CrewaiPlatformToolBuilder:
verify=os.environ.get("CREWAI_FACTORY", "false").lower() != "true", verify=os.environ.get("CREWAI_FACTORY", "false").lower() != "true",
) )
response.raise_for_status() response.raise_for_status()
except Exception as e: except Exception:
logger.error(f"Failed to fetch platform tools for apps {self._apps}: {e}")
return return
raw_data = response.json() raw_data = response.json()
@@ -64,8 +51,6 @@ class CrewaiPlatformToolBuilder:
for app, action_list in action_categories.items(): for app, action_list in action_categories.items():
if isinstance(action_list, list): if isinstance(action_list, list):
for action in action_list: for action in action_list:
if not isinstance(action, dict):
continue
if action_name := action.get("name"): if action_name := action.get("name"):
action_schema = { action_schema = {
"function": { "function": {
@@ -79,16 +64,72 @@ class CrewaiPlatformToolBuilder:
} }
self._actions_schema[action_name] = action_schema self._actions_schema[action_name] = action_schema
def _create_tools(self) -> None: def _generate_detailed_description(
"""Create tool instances from fetched action schemas.""" self, schema: dict[str, Any], indent: int = 0
tools: list[BaseTool] = [] ) -> list[str]:
descriptions = []
indent_str = " " * indent
schema_type = schema.get("type", "string")
if schema_type == "object":
properties = schema.get("properties", {})
required_fields = schema.get("required", [])
if properties:
descriptions.append(f"{indent_str}Object with properties:")
for prop_name, prop_schema in properties.items():
prop_desc = prop_schema.get("description", "")
is_required = prop_name in required_fields
req_str = " (required)" if is_required else " (optional)"
descriptions.append(
f"{indent_str} - {prop_name}: {prop_desc}{req_str}"
)
if prop_schema.get("type") == "object":
descriptions.extend(
self._generate_detailed_description(prop_schema, indent + 2)
)
elif prop_schema.get("type") == "array":
items_schema = prop_schema.get("items", {})
if items_schema.get("type") == "object":
descriptions.append(f"{indent_str} Array of objects:")
descriptions.extend(
self._generate_detailed_description(
items_schema, indent + 3
)
)
elif "enum" in items_schema:
descriptions.append(
f"{indent_str} Array of enum values: {items_schema['enum']}"
)
elif "enum" in prop_schema:
descriptions.append(
f"{indent_str} Enum values: {prop_schema['enum']}"
)
return descriptions
def _create_tools(self):
tools = []
for action_name, action_schema in self._actions_schema.items(): for action_name, action_schema in self._actions_schema.items():
function_details = action_schema.get("function", {}) function_details = action_schema.get("function", {})
description = function_details.get("description", f"Execute {action_name}") description = function_details.get("description", f"Execute {action_name}")
parameters = function_details.get("parameters", {})
param_descriptions = []
if parameters.get("properties"):
param_descriptions.append("\nDetailed Parameter Structure:")
param_descriptions.extend(
self._generate_detailed_description(parameters)
)
full_description = description + "\n".join(param_descriptions)
tool = CrewAIPlatformActionTool( tool = CrewAIPlatformActionTool(
description=description, description=full_description,
action_name=action_name, action_name=action_name,
action_schema=action_schema, action_schema=action_schema,
) )
@@ -97,14 +138,8 @@ class CrewaiPlatformToolBuilder:
self._tools = tools self._tools = tools
def __enter__(self) -> list[BaseTool]: def __enter__(self):
"""Enter context manager and return tools."""
return self.tools() return self.tools()
def __exit__( def __exit__(self, exc_type, exc_val, exc_tb):
self, pass
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
"""Exit context manager."""

View File

@@ -1,3 +1,4 @@
from typing import Union, get_args, get_origin
from unittest.mock import patch, Mock from unittest.mock import patch, Mock
import os import os
@@ -6,6 +7,251 @@ from crewai_tools.tools.crewai_platform_tools.crewai_platform_action_tool import
) )
class TestSchemaProcessing:
def setup_method(self):
self.base_action_schema = {
"function": {
"parameters": {
"properties": {},
"required": []
}
}
}
def create_test_tool(self, action_name="test_action"):
return CrewAIPlatformActionTool(
description="Test tool",
action_name=action_name,
action_schema=self.base_action_schema
)
def test_anyof_multiple_types(self):
tool = self.create_test_tool()
test_schema = {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "integer"}
]
}
result_type = tool._process_schema_type(test_schema, "TestField")
assert get_origin(result_type) is Union
args = get_args(result_type)
expected_types = (str, float, int)
for expected_type in expected_types:
assert expected_type in args
def test_anyof_with_null(self):
tool = self.create_test_tool()
test_schema = {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "null"}
]
}
result_type = tool._process_schema_type(test_schema, "TestFieldNullable")
assert get_origin(result_type) is Union
args = get_args(result_type)
assert type(None) in args
assert str in args
assert float in args
def test_anyof_single_type(self):
tool = self.create_test_tool()
test_schema = {
"anyOf": [
{"type": "string"}
]
}
result_type = tool._process_schema_type(test_schema, "TestFieldSingle")
assert result_type is str
def test_oneof_multiple_types(self):
tool = self.create_test_tool()
test_schema = {
"oneOf": [
{"type": "string"},
{"type": "boolean"}
]
}
result_type = tool._process_schema_type(test_schema, "TestFieldOneOf")
assert get_origin(result_type) is Union
args = get_args(result_type)
expected_types = (str, bool)
for expected_type in expected_types:
assert expected_type in args
def test_oneof_single_type(self):
tool = self.create_test_tool()
test_schema = {
"oneOf": [
{"type": "integer"}
]
}
result_type = tool._process_schema_type(test_schema, "TestFieldOneOfSingle")
assert result_type is int
def test_basic_types(self):
tool = self.create_test_tool()
test_cases = [
({"type": "string"}, str),
({"type": "integer"}, int),
({"type": "number"}, float),
({"type": "boolean"}, bool),
({"type": "array", "items": {"type": "string"}}, list),
]
for schema, expected_type in test_cases:
result_type = tool._process_schema_type(schema, "TestField")
if schema["type"] == "array":
assert get_origin(result_type) is list
else:
assert result_type is expected_type
def test_enum_handling(self):
tool = self.create_test_tool()
test_schema = {
"type": "string",
"enum": ["option1", "option2", "option3"]
}
result_type = tool._process_schema_type(test_schema, "TestFieldEnum")
assert result_type is str
def test_nested_anyof(self):
tool = self.create_test_tool()
test_schema = {
"anyOf": [
{"type": "string"},
{
"anyOf": [
{"type": "integer"},
{"type": "boolean"}
]
}
]
}
result_type = tool._process_schema_type(test_schema, "TestFieldNested")
assert get_origin(result_type) is Union
args = get_args(result_type)
assert str in args
if len(args) == 3:
assert int in args
assert bool in args
else:
nested_union = next(arg for arg in args if get_origin(arg) is Union)
nested_args = get_args(nested_union)
assert int in nested_args
assert bool in nested_args
def test_allof_same_types(self):
tool = self.create_test_tool()
test_schema = {
"allOf": [
{"type": "string"},
{"type": "string", "maxLength": 100}
]
}
result_type = tool._process_schema_type(test_schema, "TestFieldAllOfSame")
assert result_type is str
def test_allof_object_merge(self):
tool = self.create_test_tool()
test_schema = {
"allOf": [
{
"type": "object",
"properties": {
"name": {"type": "string"},
"age": {"type": "integer"}
},
"required": ["name"]
},
{
"type": "object",
"properties": {
"email": {"type": "string"},
"age": {"type": "integer"}
},
"required": ["email"]
}
]
}
result_type = tool._process_schema_type(test_schema, "TestFieldAllOfMerged")
# Should create a merged model with all properties
# The implementation might fall back to dict if model creation fails
# Let's just verify it's not a basic scalar type
assert result_type is not str
assert result_type is not int
assert result_type is not bool
# It could be dict (fallback) or a proper model class
assert result_type in (dict, type) or hasattr(result_type, '__name__')
def test_allof_single_schema(self):
"""Test that allOf with single schema works correctly."""
tool = self.create_test_tool()
test_schema = {
"allOf": [
{"type": "boolean"}
]
}
result_type = tool._process_schema_type(test_schema, "TestFieldAllOfSingle")
# Should be just bool
assert result_type is bool
def test_allof_mixed_types(self):
tool = self.create_test_tool()
test_schema = {
"allOf": [
{"type": "string"},
{"type": "integer"}
]
}
result_type = tool._process_schema_type(test_schema, "TestFieldAllOfMixed")
assert result_type is str
class TestCrewAIPlatformActionToolVerify: class TestCrewAIPlatformActionToolVerify:
"""Test suite for SSL verification behavior based on CREWAI_FACTORY environment variable""" """Test suite for SSL verification behavior based on CREWAI_FACTORY environment variable"""

View File

@@ -224,6 +224,43 @@ class TestCrewaiPlatformToolBuilder(unittest.TestCase):
_, kwargs = mock_get.call_args _, kwargs = mock_get.call_args
assert kwargs["params"]["apps"] == "" assert kwargs["params"]["apps"] == ""
def test_detailed_description_generation(self):
builder = CrewaiPlatformToolBuilder(apps=["test"])
complex_schema = {
"type": "object",
"properties": {
"simple_string": {"type": "string", "description": "A simple string"},
"nested_object": {
"type": "object",
"properties": {
"inner_prop": {
"type": "integer",
"description": "Inner property",
}
},
"description": "Nested object",
},
"array_prop": {
"type": "array",
"items": {"type": "string"},
"description": "Array of strings",
},
},
}
descriptions = builder._generate_detailed_description(complex_schema)
assert isinstance(descriptions, list)
assert len(descriptions) > 0
description_text = "\n".join(descriptions)
assert "simple_string" in description_text
assert "nested_object" in description_text
assert "array_prop" in description_text
class TestCrewaiPlatformToolBuilderVerify(unittest.TestCase): class TestCrewaiPlatformToolBuilderVerify(unittest.TestCase):
"""Test suite for SSL verification behavior in CrewaiPlatformToolBuilder""" """Test suite for SSL verification behavior in CrewaiPlatformToolBuilder"""

View File

@@ -49,7 +49,7 @@ Repository = "https://github.com/crewAIInc/crewAI"
[project.optional-dependencies] [project.optional-dependencies]
tools = [ tools = [
"crewai-tools==1.9.1", "crewai-tools==1.9.0",
] ]
embeddings = [ embeddings = [
"tiktoken~=0.8.0" "tiktoken~=0.8.0"

View File

@@ -40,7 +40,7 @@ def _suppress_pydantic_deprecation_warnings() -> None:
_suppress_pydantic_deprecation_warnings() _suppress_pydantic_deprecation_warnings()
__version__ = "1.9.1" __version__ = "1.9.0"
_telemetry_submitted = False _telemetry_submitted = False

View File

@@ -28,11 +28,6 @@ from crewai.hooks.llm_hooks import (
get_after_llm_call_hooks, get_after_llm_call_hooks,
get_before_llm_call_hooks, get_before_llm_call_hooks,
) )
from crewai.hooks.tool_hooks import (
ToolCallHookContext,
get_after_tool_call_hooks,
get_before_tool_call_hooks,
)
from crewai.utilities.agent_utils import ( from crewai.utilities.agent_utils import (
aget_llm_response, aget_llm_response,
convert_tools_to_openai_schema, convert_tools_to_openai_schema,
@@ -754,41 +749,8 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
track_delegation_if_needed(func_name, args_dict, self.task) track_delegation_if_needed(func_name, args_dict, self.task)
# Find the structured tool for hook context # Execute the tool (only if not cached and not at max usage)
structured_tool = None if not from_cache and not max_usage_reached:
for tool in self.tools or []:
if sanitize_tool_name(tool.name) == func_name:
structured_tool = tool
break
# Execute before_tool_call hooks
hook_blocked = False
before_hook_context = ToolCallHookContext(
tool_name=func_name,
tool_input=args_dict,
tool=structured_tool, # type: ignore[arg-type]
agent=self.agent,
task=self.task,
crew=self.crew,
)
before_hooks = get_before_tool_call_hooks()
try:
for hook in before_hooks:
hook_result = hook(before_hook_context)
if hook_result is False:
hook_blocked = True
break
except Exception as hook_error:
self._printer.print(
content=f"Error in before_tool_call hook: {hook_error}",
color="red",
)
# If hook blocked execution, set result and skip tool execution
if hook_blocked:
result = f"Tool execution blocked by hook. Tool: {func_name}"
# Execute the tool (only if not cached, not at max usage, and not blocked by hook)
elif not from_cache and not max_usage_reached:
result = "Tool not found" result = "Tool not found"
if func_name in available_functions: if func_name in available_functions:
try: try:
@@ -836,28 +798,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
# Return error message when max usage limit is reached # Return error message when max usage limit is reached
result = f"Tool '{func_name}' has reached its usage limit of {original_tool.max_usage_count} times and cannot be used anymore." result = f"Tool '{func_name}' has reached its usage limit of {original_tool.max_usage_count} times and cannot be used anymore."
after_hook_context = ToolCallHookContext(
tool_name=func_name,
tool_input=args_dict,
tool=structured_tool, # type: ignore[arg-type]
agent=self.agent,
task=self.task,
crew=self.crew,
tool_result=result,
)
after_hooks = get_after_tool_call_hooks()
try:
for after_hook in after_hooks:
hook_result = after_hook(after_hook_context)
if hook_result is not None:
result = hook_result
after_hook_context.tool_result = result
except Exception as hook_error:
self._printer.print(
content=f"Error in after_tool_call hook: {hook_error}",
color="red",
)
# Emit tool usage finished event # Emit tool usage finished event
crewai_event_bus.emit( crewai_event_bus.emit(
self, self,

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }] authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14" requires-python = ">=3.10,<3.14"
dependencies = [ dependencies = [
"crewai[tools]==1.9.1" "crewai[tools]==1.9.0"
] ]
[project.scripts] [project.scripts]

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }] authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14" requires-python = ">=3.10,<3.14"
dependencies = [ dependencies = [
"crewai[tools]==1.9.1" "crewai[tools]==1.9.0"
] ]
[project.scripts] [project.scripts]

View File

@@ -36,12 +36,6 @@ from crewai.hooks.llm_hooks import (
get_after_llm_call_hooks, get_after_llm_call_hooks,
get_before_llm_call_hooks, get_before_llm_call_hooks,
) )
from crewai.hooks.tool_hooks import (
ToolCallHookContext,
get_after_tool_call_hooks,
get_before_tool_call_hooks,
)
from crewai.hooks.types import AfterLLMCallHookType, BeforeLLMCallHookType
from crewai.utilities.agent_utils import ( from crewai.utilities.agent_utils import (
convert_tools_to_openai_schema, convert_tools_to_openai_schema,
enforce_rpm_limit, enforce_rpm_limit,
@@ -191,8 +185,8 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
self._instance_id = str(uuid4())[:8] self._instance_id = str(uuid4())[:8]
self.before_llm_call_hooks: list[BeforeLLMCallHookType] = [] self.before_llm_call_hooks: list[Callable] = []
self.after_llm_call_hooks: list[AfterLLMCallHookType] = [] self.after_llm_call_hooks: list[Callable] = []
self.before_llm_call_hooks.extend(get_before_llm_call_hooks()) self.before_llm_call_hooks.extend(get_before_llm_call_hooks())
self.after_llm_call_hooks.extend(get_after_llm_call_hooks()) self.after_llm_call_hooks.extend(get_after_llm_call_hooks())
@@ -305,21 +299,11 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
"""Compatibility property for mixin - returns state messages.""" """Compatibility property for mixin - returns state messages."""
return self._state.messages return self._state.messages
@messages.setter
def messages(self, value: list[LLMMessage]) -> None:
"""Set state messages."""
self._state.messages = value
@property @property
def iterations(self) -> int: def iterations(self) -> int:
"""Compatibility property for mixin - returns state iterations.""" """Compatibility property for mixin - returns state iterations."""
return self._state.iterations return self._state.iterations
@iterations.setter
def iterations(self, value: int) -> None:
"""Set state iterations."""
self._state.iterations = value
@start() @start()
def initialize_reasoning(self) -> Literal["initialized"]: def initialize_reasoning(self) -> Literal["initialized"]:
"""Initialize the reasoning flow and emit agent start logs.""" """Initialize the reasoning flow and emit agent start logs."""
@@ -593,12 +577,6 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
"content": None, "content": None,
"tool_calls": tool_calls_to_report, "tool_calls": tool_calls_to_report,
} }
if all(
type(tc).__qualname__ == "Part" for tc in self.state.pending_tool_calls
):
assistant_message["raw_tool_call_parts"] = list(
self.state.pending_tool_calls
)
self.state.messages.append(assistant_message) self.state.messages.append(assistant_message)
# Now execute each tool # Now execute each tool
@@ -633,12 +611,14 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
# Check if tool has reached max usage count # Check if tool has reached max usage count
max_usage_reached = False max_usage_reached = False
if ( if original_tool:
original_tool if (
and original_tool.max_usage_count is not None hasattr(original_tool, "max_usage_count")
and original_tool.current_usage_count >= original_tool.max_usage_count and original_tool.max_usage_count is not None
): and original_tool.current_usage_count
max_usage_reached = True >= original_tool.max_usage_count
):
max_usage_reached = True
# Check cache before executing # Check cache before executing
from_cache = False from_cache = False
@@ -670,37 +650,8 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
track_delegation_if_needed(func_name, args_dict, self.task) track_delegation_if_needed(func_name, args_dict, self.task)
structured_tool = None # Execute the tool (only if not cached and not at max usage)
for tool in self.tools or []: if not from_cache and not max_usage_reached:
if sanitize_tool_name(tool.name) == func_name:
structured_tool = tool
break
hook_blocked = False
before_hook_context = ToolCallHookContext(
tool_name=func_name,
tool_input=args_dict,
tool=structured_tool, # type: ignore[arg-type]
agent=self.agent,
task=self.task,
crew=self.crew,
)
before_hooks = get_before_tool_call_hooks()
try:
for hook in before_hooks:
hook_result = hook(before_hook_context)
if hook_result is False:
hook_blocked = True
break
except Exception as hook_error:
self._printer.print(
content=f"Error in before_tool_call hook: {hook_error}",
color="red",
)
if hook_blocked:
result = f"Tool execution blocked by hook. Tool: {func_name}"
elif not from_cache and not max_usage_reached:
result = "Tool not found" result = "Tool not found"
if func_name in self._available_functions: if func_name in self._available_functions:
try: try:
@@ -710,7 +661,11 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
# Add to cache after successful execution (before string conversion) # Add to cache after successful execution (before string conversion)
if self.tools_handler and self.tools_handler.cache: if self.tools_handler and self.tools_handler.cache:
should_cache = True should_cache = True
if original_tool: if (
original_tool
and hasattr(original_tool, "cache_function")
and original_tool.cache_function
):
should_cache = original_tool.cache_function( should_cache = original_tool.cache_function(
args_dict, raw_result args_dict, raw_result
) )
@@ -741,33 +696,10 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
error=e, error=e,
), ),
) )
elif max_usage_reached and original_tool: elif max_usage_reached:
# Return error message when max usage limit is reached # Return error message when max usage limit is reached
result = f"Tool '{func_name}' has reached its usage limit of {original_tool.max_usage_count} times and cannot be used anymore." result = f"Tool '{func_name}' has reached its usage limit of {original_tool.max_usage_count} times and cannot be used anymore."
# Execute after_tool_call hooks (even if blocked, to allow logging/monitoring)
after_hook_context = ToolCallHookContext(
tool_name=func_name,
tool_input=args_dict,
tool=structured_tool, # type: ignore[arg-type]
agent=self.agent,
task=self.task,
crew=self.crew,
tool_result=result,
)
after_hooks = get_after_tool_call_hooks()
try:
for after_hook in after_hooks:
hook_result = after_hook(after_hook_context)
if hook_result is not None:
result = hook_result
after_hook_context.tool_result = result
except Exception as hook_error:
self._printer.print(
content=f"Error in after_tool_call hook: {hook_error}",
color="red",
)
# Emit tool usage finished event # Emit tool usage finished event
crewai_event_bus.emit( crewai_event_bus.emit(
self, self,
@@ -901,10 +833,6 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
@listen("parser_error") @listen("parser_error")
def recover_from_parser_error(self) -> Literal["initialized"]: def recover_from_parser_error(self) -> Literal["initialized"]:
"""Recover from output parser errors and retry.""" """Recover from output parser errors and retry."""
if not self._last_parser_error:
self.state.iterations += 1
return "initialized"
formatted_answer = handle_output_parser_exception( formatted_answer = handle_output_parser_exception(
e=self._last_parser_error, e=self._last_parser_error,
messages=list(self.state.messages), messages=list(self.state.messages),

View File

@@ -9,7 +9,6 @@ from crewai.utilities.printer import Printer
if TYPE_CHECKING: if TYPE_CHECKING:
from crewai.agents.crew_agent_executor import CrewAgentExecutor from crewai.agents.crew_agent_executor import CrewAgentExecutor
from crewai.experimental.agent_executor import AgentExecutor
from crewai.lite_agent import LiteAgent from crewai.lite_agent import LiteAgent
from crewai.llms.base_llm import BaseLLM from crewai.llms.base_llm import BaseLLM
from crewai.utilities.types import LLMMessage from crewai.utilities.types import LLMMessage
@@ -42,7 +41,7 @@ class LLMCallHookContext:
Can be modified by returning a new string from after_llm_call hook. Can be modified by returning a new string from after_llm_call hook.
""" """
executor: CrewAgentExecutor | AgentExecutor | LiteAgent | None executor: CrewAgentExecutor | LiteAgent | None
messages: list[LLMMessage] messages: list[LLMMessage]
agent: Any agent: Any
task: Any task: Any
@@ -53,7 +52,7 @@ class LLMCallHookContext:
def __init__( def __init__(
self, self,
executor: CrewAgentExecutor | AgentExecutor | LiteAgent | None = None, executor: CrewAgentExecutor | LiteAgent | None = None,
response: str | None = None, response: str | None = None,
messages: list[LLMMessage] | None = None, messages: list[LLMMessage] | None = None,
llm: BaseLLM | str | Any | None = None, # TODO: look into llm: BaseLLM | str | Any | None = None, # TODO: look into

View File

@@ -16,7 +16,6 @@ from crewai.utilities.agent_utils import is_context_length_exceeded
from crewai.utilities.exceptions.context_window_exceeding_exception import ( from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededError, LLMContextLengthExceededError,
) )
from crewai.utilities.pydantic_schema_utils import generate_model_description
from crewai.utilities.types import LLMMessage from crewai.utilities.types import LLMMessage
@@ -549,11 +548,7 @@ class BedrockCompletion(BaseLLM):
"toolSpec": { "toolSpec": {
"name": "structured_output", "name": "structured_output",
"description": "Returns structured data according to the schema", "description": "Returns structured data according to the schema",
"inputSchema": { "inputSchema": {"json": response_model.model_json_schema()},
"json": generate_model_description(response_model)
.get("json_schema", {})
.get("schema", {})
},
} }
} }
body["toolConfig"] = cast( body["toolConfig"] = cast(
@@ -784,11 +779,7 @@ class BedrockCompletion(BaseLLM):
"toolSpec": { "toolSpec": {
"name": "structured_output", "name": "structured_output",
"description": "Returns structured data according to the schema", "description": "Returns structured data according to the schema",
"inputSchema": { "inputSchema": {"json": response_model.model_json_schema()},
"json": generate_model_description(response_model)
.get("json_schema", {})
.get("schema", {})
},
} }
} }
body["toolConfig"] = cast( body["toolConfig"] = cast(
@@ -1020,11 +1011,7 @@ class BedrockCompletion(BaseLLM):
"toolSpec": { "toolSpec": {
"name": "structured_output", "name": "structured_output",
"description": "Returns structured data according to the schema", "description": "Returns structured data according to the schema",
"inputSchema": { "inputSchema": {"json": response_model.model_json_schema()},
"json": generate_model_description(response_model)
.get("json_schema", {})
.get("schema", {})
},
} }
} }
body["toolConfig"] = cast( body["toolConfig"] = cast(
@@ -1236,11 +1223,7 @@ class BedrockCompletion(BaseLLM):
"toolSpec": { "toolSpec": {
"name": "structured_output", "name": "structured_output",
"description": "Returns structured data according to the schema", "description": "Returns structured data according to the schema",
"inputSchema": { "inputSchema": {"json": response_model.model_json_schema()},
"json": generate_model_description(response_model)
.get("json_schema", {})
.get("schema", {})
},
} }
} }
body["toolConfig"] = cast( body["toolConfig"] = cast(

View File

@@ -15,7 +15,6 @@ from crewai.utilities.agent_utils import is_context_length_exceeded
from crewai.utilities.exceptions.context_window_exceeding_exception import ( from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededError, LLMContextLengthExceededError,
) )
from crewai.utilities.pydantic_schema_utils import generate_model_description
from crewai.utilities.types import LLMMessage from crewai.utilities.types import LLMMessage
@@ -465,10 +464,7 @@ class GeminiCompletion(BaseLLM):
if response_model: if response_model:
config_params["response_mime_type"] = "application/json" config_params["response_mime_type"] = "application/json"
schema_output = generate_model_description(response_model) config_params["response_schema"] = response_model.model_json_schema()
config_params["response_schema"] = schema_output.get("json_schema", {}).get(
"schema", {}
)
# Handle tools for supported models # Handle tools for supported models
if tools and self.supports_tools: if tools and self.supports_tools:
@@ -493,7 +489,7 @@ class GeminiCompletion(BaseLLM):
function_declaration = types.FunctionDeclaration( function_declaration = types.FunctionDeclaration(
name=name, name=name,
description=description, description=description,
parameters_json_schema=parameters if parameters else None, parameters=parameters if parameters else None,
) )
gemini_tool = types.Tool(function_declarations=[function_declaration]) gemini_tool = types.Tool(function_declarations=[function_declaration])
@@ -547,10 +543,11 @@ class GeminiCompletion(BaseLLM):
else: else:
parts.append(types.Part.from_text(text=str(content) if content else "")) parts.append(types.Part.from_text(text=str(content) if content else ""))
text_content: str = " ".join(p.text for p in parts if p.text is not None)
if role == "system": if role == "system":
# Extract system instruction - Gemini handles it separately # Extract system instruction - Gemini handles it separately
text_content = " ".join(
p.text for p in parts if hasattr(p, "text") and p.text
)
if system_instruction: if system_instruction:
system_instruction += f"\n\n{text_content}" system_instruction += f"\n\n{text_content}"
else: else:
@@ -579,40 +576,31 @@ class GeminiCompletion(BaseLLM):
types.Content(role="user", parts=[function_response_part]) types.Content(role="user", parts=[function_response_part])
) )
elif role == "assistant" and message.get("tool_calls"): elif role == "assistant" and message.get("tool_calls"):
raw_parts: list[Any] | None = message.get("raw_tool_call_parts") tool_parts: list[types.Part] = []
if raw_parts and all(isinstance(p, types.Part) for p in raw_parts):
tool_parts: list[types.Part] = list(raw_parts)
if text_content:
tool_parts.insert(0, types.Part.from_text(text=text_content))
else:
tool_parts = []
if text_content:
tool_parts.append(types.Part.from_text(text=text_content))
tool_calls: list[dict[str, Any]] = message.get("tool_calls") or [] if text_content:
for tool_call in tool_calls: tool_parts.append(types.Part.from_text(text=text_content))
func: dict[str, Any] = tool_call.get("function") or {}
func_name: str = str(func.get("name") or "")
func_args_raw: str | dict[str, Any] = (
func.get("arguments") or {}
)
func_args: dict[str, Any] tool_calls: list[dict[str, Any]] = message.get("tool_calls") or []
if isinstance(func_args_raw, str): for tool_call in tool_calls:
try: func: dict[str, Any] = tool_call.get("function") or {}
func_args = ( func_name: str = str(func.get("name") or "")
json.loads(func_args_raw) if func_args_raw else {} func_args_raw: str | dict[str, Any] = func.get("arguments") or {}
)
except (json.JSONDecodeError, TypeError):
func_args = {}
else:
func_args = func_args_raw
tool_parts.append( func_args: dict[str, Any]
types.Part.from_function_call( if isinstance(func_args_raw, str):
name=func_name, args=func_args try:
func_args = (
json.loads(func_args_raw) if func_args_raw else {}
) )
) except (json.JSONDecodeError, TypeError):
func_args = {}
else:
func_args = func_args_raw
tool_parts.append(
types.Part.from_function_call(name=func_name, args=func_args)
)
contents.append(types.Content(role="model", parts=tool_parts)) contents.append(types.Content(role="model", parts=tool_parts))
else: else:

View File

@@ -693,14 +693,14 @@ class OpenAICompletion(BaseLLM):
if response_model or self.response_format: if response_model or self.response_format:
format_model = response_model or self.response_format format_model = response_model or self.response_format
if isinstance(format_model, type) and issubclass(format_model, BaseModel): if isinstance(format_model, type) and issubclass(format_model, BaseModel):
schema_output = generate_model_description(format_model) schema = format_model.model_json_schema()
json_schema = schema_output.get("json_schema", {}) schema["additionalProperties"] = False
params["text"] = { params["text"] = {
"format": { "format": {
"type": "json_schema", "type": "json_schema",
"name": json_schema.get("name", format_model.__name__), "name": format_model.__name__,
"strict": json_schema.get("strict", True), "strict": True,
"schema": json_schema.get("schema", {}), "schema": schema,
} }
} }
elif isinstance(format_model, dict): elif isinstance(format_model, dict):
@@ -1060,7 +1060,7 @@ class OpenAICompletion(BaseLLM):
chunk=delta_text, chunk=delta_text,
from_task=from_task, from_task=from_task,
from_agent=from_agent, from_agent=from_agent,
response_id=response_id_stream, response_id=response_id_stream
) )
elif event.type == "response.function_call_arguments.delta": elif event.type == "response.function_call_arguments.delta":
@@ -1709,7 +1709,7 @@ class OpenAICompletion(BaseLLM):
**parse_params, response_format=response_model **parse_params, response_format=response_model
) as stream: ) as stream:
for chunk in stream: for chunk in stream:
response_id_stream = chunk.id if hasattr(chunk, "id") else None response_id_stream=chunk.id if hasattr(chunk,"id") else None
if chunk.type == "content.delta": if chunk.type == "content.delta":
delta_content = chunk.delta delta_content = chunk.delta
@@ -1718,7 +1718,7 @@ class OpenAICompletion(BaseLLM):
chunk=delta_content, chunk=delta_content,
from_task=from_task, from_task=from_task,
from_agent=from_agent, from_agent=from_agent,
response_id=response_id_stream, response_id=response_id_stream
) )
final_completion = stream.get_final_completion() final_completion = stream.get_final_completion()
@@ -1748,9 +1748,7 @@ class OpenAICompletion(BaseLLM):
usage_data = {"total_tokens": 0} usage_data = {"total_tokens": 0}
for completion_chunk in completion_stream: for completion_chunk in completion_stream:
response_id_stream = ( response_id_stream=completion_chunk.id if hasattr(completion_chunk,"id") else None
completion_chunk.id if hasattr(completion_chunk, "id") else None
)
if hasattr(completion_chunk, "usage") and completion_chunk.usage: if hasattr(completion_chunk, "usage") and completion_chunk.usage:
usage_data = self._extract_openai_token_usage(completion_chunk) usage_data = self._extract_openai_token_usage(completion_chunk)
@@ -1768,7 +1766,7 @@ class OpenAICompletion(BaseLLM):
chunk=chunk_delta.content, chunk=chunk_delta.content,
from_task=from_task, from_task=from_task,
from_agent=from_agent, from_agent=from_agent,
response_id=response_id_stream, response_id=response_id_stream
) )
if chunk_delta.tool_calls: if chunk_delta.tool_calls:
@@ -1807,7 +1805,7 @@ class OpenAICompletion(BaseLLM):
"index": tool_calls[tool_index]["index"], "index": tool_calls[tool_index]["index"],
}, },
call_type=LLMCallType.TOOL_CALL, call_type=LLMCallType.TOOL_CALL,
response_id=response_id_stream, response_id=response_id_stream
) )
self._track_token_usage_internal(usage_data) self._track_token_usage_internal(usage_data)
@@ -2019,7 +2017,7 @@ class OpenAICompletion(BaseLLM):
accumulated_content = "" accumulated_content = ""
usage_data = {"total_tokens": 0} usage_data = {"total_tokens": 0}
async for chunk in completion_stream: async for chunk in completion_stream:
response_id_stream = chunk.id if hasattr(chunk, "id") else None response_id_stream=chunk.id if hasattr(chunk,"id") else None
if hasattr(chunk, "usage") and chunk.usage: if hasattr(chunk, "usage") and chunk.usage:
usage_data = self._extract_openai_token_usage(chunk) usage_data = self._extract_openai_token_usage(chunk)
@@ -2037,7 +2035,7 @@ class OpenAICompletion(BaseLLM):
chunk=delta.content, chunk=delta.content,
from_task=from_task, from_task=from_task,
from_agent=from_agent, from_agent=from_agent,
response_id=response_id_stream, response_id=response_id_stream
) )
self._track_token_usage_internal(usage_data) self._track_token_usage_internal(usage_data)
@@ -2073,7 +2071,7 @@ class OpenAICompletion(BaseLLM):
usage_data = {"total_tokens": 0} usage_data = {"total_tokens": 0}
async for chunk in stream: async for chunk in stream:
response_id_stream = chunk.id if hasattr(chunk, "id") else None response_id_stream=chunk.id if hasattr(chunk,"id") else None
if hasattr(chunk, "usage") and chunk.usage: if hasattr(chunk, "usage") and chunk.usage:
usage_data = self._extract_openai_token_usage(chunk) usage_data = self._extract_openai_token_usage(chunk)
@@ -2091,7 +2089,7 @@ class OpenAICompletion(BaseLLM):
chunk=chunk_delta.content, chunk=chunk_delta.content,
from_task=from_task, from_task=from_task,
from_agent=from_agent, from_agent=from_agent,
response_id=response_id_stream, response_id=response_id_stream
) )
if chunk_delta.tool_calls: if chunk_delta.tool_calls:
@@ -2130,7 +2128,7 @@ class OpenAICompletion(BaseLLM):
"index": tool_calls[tool_index]["index"], "index": tool_calls[tool_index]["index"],
}, },
call_type=LLMCallType.TOOL_CALL, call_type=LLMCallType.TOOL_CALL,
response_id=response_id_stream, response_id=response_id_stream
) )
self._track_token_usage_internal(usage_data) self._track_token_usage_internal(usage_data)

View File

@@ -2,7 +2,6 @@ import logging
import re import re
from typing import Any from typing import Any
from crewai.utilities.pydantic_schema_utils import generate_model_description
from crewai.utilities.string_utils import sanitize_tool_name from crewai.utilities.string_utils import sanitize_tool_name
@@ -78,8 +77,7 @@ def extract_tool_info(tool: dict[str, Any]) -> tuple[str, str, dict[str, Any]]:
# Also check for args_schema (Pydantic format) # Also check for args_schema (Pydantic format)
if not parameters and "args_schema" in tool: if not parameters and "args_schema" in tool:
if hasattr(tool["args_schema"], "model_json_schema"): if hasattr(tool["args_schema"], "model_json_schema"):
schema_output = generate_model_description(tool["args_schema"]) parameters = tool["args_schema"].model_json_schema()
parameters = schema_output.get("json_schema", {}).get("schema", {})
return name, description, parameters return name, description, parameters

View File

@@ -173,6 +173,13 @@ class Telemetry:
self._original_handlers: dict[int, Any] = {} self._original_handlers: dict[int, Any] = {}
if threading.current_thread() is not threading.main_thread():
logger.debug(
"CrewAI telemetry: Skipping signal handler registration "
"(not running in main thread)."
)
return
self._register_signal_handler(signal.SIGTERM, SigTermEvent, shutdown=True) self._register_signal_handler(signal.SIGTERM, SigTermEvent, shutdown=True)
self._register_signal_handler(signal.SIGINT, SigIntEvent, shutdown=True) self._register_signal_handler(signal.SIGINT, SigIntEvent, shutdown=True)
if hasattr(signal, "SIGHUP"): if hasattr(signal, "SIGHUP"):

View File

@@ -28,7 +28,6 @@ from crewai.utilities.exceptions.context_window_exceeding_exception import (
) )
from crewai.utilities.i18n import I18N from crewai.utilities.i18n import I18N
from crewai.utilities.printer import ColoredText, Printer from crewai.utilities.printer import ColoredText, Printer
from crewai.utilities.pydantic_schema_utils import generate_model_description
from crewai.utilities.string_utils import sanitize_tool_name from crewai.utilities.string_utils import sanitize_tool_name
from crewai.utilities.token_counter_callback import TokenCalcHandler from crewai.utilities.token_counter_callback import TokenCalcHandler
from crewai.utilities.types import LLMMessage from crewai.utilities.types import LLMMessage
@@ -37,7 +36,6 @@ from crewai.utilities.types import LLMMessage
if TYPE_CHECKING: if TYPE_CHECKING:
from crewai.agent import Agent from crewai.agent import Agent
from crewai.agents.crew_agent_executor import CrewAgentExecutor from crewai.agents.crew_agent_executor import CrewAgentExecutor
from crewai.experimental.agent_executor import AgentExecutor
from crewai.lite_agent import LiteAgent from crewai.lite_agent import LiteAgent
from crewai.llm import LLM from crewai.llm import LLM
from crewai.task import Task from crewai.task import Task
@@ -160,8 +158,7 @@ def convert_tools_to_openai_schema(
parameters: dict[str, Any] = {} parameters: dict[str, Any] = {}
if hasattr(tool, "args_schema") and tool.args_schema is not None: if hasattr(tool, "args_schema") and tool.args_schema is not None:
try: try:
schema_output = generate_model_description(tool.args_schema) parameters = tool.args_schema.model_json_schema()
parameters = schema_output.get("json_schema", {}).get("schema", {})
# Remove title and description from schema root as they're redundant # Remove title and description from schema root as they're redundant
parameters.pop("title", None) parameters.pop("title", None)
parameters.pop("description", None) parameters.pop("description", None)
@@ -321,7 +318,7 @@ def get_llm_response(
from_task: Task | None = None, from_task: Task | None = None,
from_agent: Agent | LiteAgent | None = None, from_agent: Agent | LiteAgent | None = None,
response_model: type[BaseModel] | None = None, response_model: type[BaseModel] | None = None,
executor_context: CrewAgentExecutor | AgentExecutor | LiteAgent | None = None, executor_context: CrewAgentExecutor | LiteAgent | None = None,
) -> str | Any: ) -> str | Any:
"""Call the LLM and return the response, handling any invalid responses. """Call the LLM and return the response, handling any invalid responses.
@@ -383,7 +380,7 @@ async def aget_llm_response(
from_task: Task | None = None, from_task: Task | None = None,
from_agent: Agent | LiteAgent | None = None, from_agent: Agent | LiteAgent | None = None,
response_model: type[BaseModel] | None = None, response_model: type[BaseModel] | None = None,
executor_context: CrewAgentExecutor | AgentExecutor | None = None, executor_context: CrewAgentExecutor | None = None,
) -> str | Any: ) -> str | Any:
"""Call the LLM asynchronously and return the response. """Call the LLM asynchronously and return the response.
@@ -903,8 +900,7 @@ def extract_tool_call_info(
def _setup_before_llm_call_hooks( def _setup_before_llm_call_hooks(
executor_context: CrewAgentExecutor | AgentExecutor | LiteAgent | None, executor_context: CrewAgentExecutor | LiteAgent | None, printer: Printer
printer: Printer,
) -> bool: ) -> bool:
"""Setup and invoke before_llm_call hooks for the executor context. """Setup and invoke before_llm_call hooks for the executor context.
@@ -954,7 +950,7 @@ def _setup_before_llm_call_hooks(
def _setup_after_llm_call_hooks( def _setup_after_llm_call_hooks(
executor_context: CrewAgentExecutor | AgentExecutor | LiteAgent | None, executor_context: CrewAgentExecutor | LiteAgent | None,
answer: str, answer: str,
printer: Printer, printer: Printer,
) -> str: ) -> str:

View File

@@ -1,72 +1,14 @@
"""Dynamic Pydantic model creation from JSON schemas. """Utilities for generating JSON schemas from Pydantic models.
This module provides utilities for converting JSON schemas to Pydantic models at runtime.
The main function is `create_model_from_schema`, which takes a JSON schema and returns
a dynamically created Pydantic model class.
This is used by the A2A server to honor response schemas sent by clients, allowing
structured output from agent tasks.
Based on dydantic (https://github.com/zenbase-ai/dydantic).
This module provides functions for converting Pydantic models to JSON schemas This module provides functions for converting Pydantic models to JSON schemas
suitable for use with LLMs and tool definitions. suitable for use with LLMs and tool definitions.
""" """
from __future__ import annotations
from collections.abc import Callable from collections.abc import Callable
from copy import deepcopy from copy import deepcopy
import datetime from typing import Any
import logging
from typing import TYPE_CHECKING, Annotated, Any, Literal, Union
import uuid
from pydantic import ( from pydantic import BaseModel
UUID1,
UUID3,
UUID4,
UUID5,
AnyUrl,
BaseModel,
ConfigDict,
DirectoryPath,
Field,
FilePath,
FileUrl,
HttpUrl,
Json,
MongoDsn,
NewPath,
PostgresDsn,
SecretBytes,
SecretStr,
StrictBytes,
create_model as create_model_base,
)
from pydantic.networks import ( # type: ignore[attr-defined]
IPv4Address,
IPv6Address,
IPvAnyAddress,
IPvAnyInterface,
IPvAnyNetwork,
)
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from pydantic import EmailStr
from pydantic.main import AnyClassMethod
else:
try:
from pydantic import EmailStr
except ImportError:
logger.warning(
"EmailStr unavailable, using str fallback",
extra={"missing_package": "email_validator"},
)
EmailStr = str
def resolve_refs(schema: dict[str, Any]) -> dict[str, Any]: def resolve_refs(schema: dict[str, Any]) -> dict[str, Any]:
@@ -301,319 +243,3 @@ def generate_model_description(model: type[BaseModel]) -> dict[str, Any]:
"schema": json_schema, "schema": json_schema,
}, },
} }
FORMAT_TYPE_MAP: dict[str, type[Any]] = {
"base64": Annotated[bytes, Field(json_schema_extra={"format": "base64"})], # type: ignore[dict-item]
"binary": StrictBytes,
"date": datetime.date,
"time": datetime.time,
"date-time": datetime.datetime,
"duration": datetime.timedelta,
"directory-path": DirectoryPath,
"email": EmailStr,
"file-path": FilePath,
"ipv4": IPv4Address,
"ipv6": IPv6Address,
"ipvanyaddress": IPvAnyAddress, # type: ignore[dict-item]
"ipvanyinterface": IPvAnyInterface, # type: ignore[dict-item]
"ipvanynetwork": IPvAnyNetwork, # type: ignore[dict-item]
"json-string": Json,
"multi-host-uri": PostgresDsn | MongoDsn, # type: ignore[dict-item]
"password": SecretStr,
"path": NewPath,
"uri": AnyUrl,
"uuid": uuid.UUID,
"uuid1": UUID1,
"uuid3": UUID3,
"uuid4": UUID4,
"uuid5": UUID5,
}
def create_model_from_schema( # type: ignore[no-any-unimported]
json_schema: dict[str, Any],
*,
root_schema: dict[str, Any] | None = None,
__config__: ConfigDict | None = None,
__base__: type[BaseModel] | None = None,
__module__: str = __name__,
__validators__: dict[str, AnyClassMethod] | None = None,
__cls_kwargs__: dict[str, Any] | None = None,
) -> type[BaseModel]:
"""Create a Pydantic model from a JSON schema.
This function takes a JSON schema as input and dynamically creates a Pydantic
model class based on the schema. It supports various JSON schema features such
as nested objects, referenced definitions ($ref), arrays with typed items,
union types (anyOf/oneOf), and string formats.
Args:
json_schema: A dictionary representing the JSON schema.
root_schema: The root schema containing $defs. If not provided, the
current schema is treated as the root schema.
__config__: Pydantic configuration for the generated model.
__base__: Base class for the generated model. Defaults to BaseModel.
__module__: Module name for the generated model class.
__validators__: A dictionary of custom validators for the generated model.
__cls_kwargs__: Additional keyword arguments for the generated model class.
Returns:
A dynamically created Pydantic model class based on the provided JSON schema.
Example:
>>> schema = {
... "title": "Person",
... "type": "object",
... "properties": {
... "name": {"type": "string"},
... "age": {"type": "integer"},
... },
... "required": ["name"],
... }
>>> Person = create_model_from_schema(schema)
>>> person = Person(name="John", age=30)
>>> person.name
'John'
"""
effective_root = root_schema or json_schema
if "allOf" in json_schema:
json_schema = _merge_all_of_schemas(json_schema["allOf"], effective_root)
if "title" not in json_schema and "title" in (root_schema or {}):
json_schema["title"] = (root_schema or {}).get("title")
model_name = json_schema.get("title", "DynamicModel")
field_definitions = {
name: _json_schema_to_pydantic_field(
name, prop, json_schema.get("required", []), effective_root
)
for name, prop in (json_schema.get("properties", {}) or {}).items()
}
return create_model_base(
model_name,
__config__=__config__,
__base__=__base__,
__module__=__module__,
__validators__=__validators__,
__cls_kwargs__=__cls_kwargs__,
**field_definitions,
)
def _json_schema_to_pydantic_field(
name: str,
json_schema: dict[str, Any],
required: list[str],
root_schema: dict[str, Any],
) -> Any:
"""Convert a JSON schema property to a Pydantic field definition.
Args:
name: The field name.
json_schema: The JSON schema for this field.
required: List of required field names.
root_schema: The root schema for resolving $ref.
Returns:
A tuple of (type, Field) for use with create_model.
"""
type_ = _json_schema_to_pydantic_type(json_schema, root_schema, name_=name.title())
description = json_schema.get("description")
examples = json_schema.get("examples")
is_required = name in required
field_params: dict[str, Any] = {}
schema_extra: dict[str, Any] = {}
if description:
field_params["description"] = description
if examples:
schema_extra["examples"] = examples
default = ... if is_required else None
if isinstance(type_, type) and issubclass(type_, (int, float)):
if "minimum" in json_schema:
field_params["ge"] = json_schema["minimum"]
if "exclusiveMinimum" in json_schema:
field_params["gt"] = json_schema["exclusiveMinimum"]
if "maximum" in json_schema:
field_params["le"] = json_schema["maximum"]
if "exclusiveMaximum" in json_schema:
field_params["lt"] = json_schema["exclusiveMaximum"]
if "multipleOf" in json_schema:
field_params["multiple_of"] = json_schema["multipleOf"]
format_ = json_schema.get("format")
if format_ in FORMAT_TYPE_MAP:
pydantic_type = FORMAT_TYPE_MAP[format_]
if format_ == "password":
if json_schema.get("writeOnly"):
pydantic_type = SecretBytes
elif format_ == "uri":
allowed_schemes = json_schema.get("scheme")
if allowed_schemes:
if len(allowed_schemes) == 1 and allowed_schemes[0] == "http":
pydantic_type = HttpUrl
elif len(allowed_schemes) == 1 and allowed_schemes[0] == "file":
pydantic_type = FileUrl
type_ = pydantic_type
if isinstance(type_, type) and issubclass(type_, str):
if "minLength" in json_schema:
field_params["min_length"] = json_schema["minLength"]
if "maxLength" in json_schema:
field_params["max_length"] = json_schema["maxLength"]
if "pattern" in json_schema:
field_params["pattern"] = json_schema["pattern"]
if not is_required:
type_ = type_ | None
if schema_extra:
field_params["json_schema_extra"] = schema_extra
return type_, Field(default, **field_params)
def _resolve_ref(ref: str, root_schema: dict[str, Any]) -> dict[str, Any]:
"""Resolve a $ref to its actual schema.
Args:
ref: The $ref string (e.g., "#/$defs/MyType").
root_schema: The root schema containing $defs.
Returns:
The resolved schema dict.
"""
from typing import cast
ref_path = ref.split("/")
if ref.startswith("#/$defs/"):
ref_schema: dict[str, Any] = root_schema["$defs"]
start_idx = 2
else:
ref_schema = root_schema
start_idx = 1
for path in ref_path[start_idx:]:
ref_schema = cast(dict[str, Any], ref_schema[path])
return ref_schema
def _merge_all_of_schemas(
schemas: list[dict[str, Any]],
root_schema: dict[str, Any],
) -> dict[str, Any]:
"""Merge multiple allOf schemas into a single schema.
Combines properties and required fields from all schemas.
Args:
schemas: List of schemas to merge.
root_schema: The root schema for resolving $ref.
Returns:
Merged schema with combined properties and required fields.
"""
merged: dict[str, Any] = {"type": "object", "properties": {}, "required": []}
for schema in schemas:
if "$ref" in schema:
schema = _resolve_ref(schema["$ref"], root_schema)
if "properties" in schema:
merged["properties"].update(schema["properties"])
if "required" in schema:
for field in schema["required"]:
if field not in merged["required"]:
merged["required"].append(field)
if "title" in schema and "title" not in merged:
merged["title"] = schema["title"]
return merged
def _json_schema_to_pydantic_type(
json_schema: dict[str, Any],
root_schema: dict[str, Any],
*,
name_: str | None = None,
) -> Any:
"""Convert a JSON schema to a Python/Pydantic type.
Args:
json_schema: The JSON schema to convert.
root_schema: The root schema for resolving $ref.
name_: Optional name for nested models.
Returns:
A Python type corresponding to the JSON schema.
"""
ref = json_schema.get("$ref")
if ref:
ref_schema = _resolve_ref(ref, root_schema)
return _json_schema_to_pydantic_type(ref_schema, root_schema, name_=name_)
enum_values = json_schema.get("enum")
if enum_values:
return Literal[tuple(enum_values)]
if "const" in json_schema:
return Literal[json_schema["const"]]
any_of_schemas = []
if "anyOf" in json_schema or "oneOf" in json_schema:
any_of_schemas = json_schema.get("anyOf", []) + json_schema.get("oneOf", [])
if any_of_schemas:
any_of_types = [
_json_schema_to_pydantic_type(schema, root_schema)
for schema in any_of_schemas
]
return Union[tuple(any_of_types)] # noqa: UP007
all_of_schemas = json_schema.get("allOf")
if all_of_schemas:
if len(all_of_schemas) == 1:
return _json_schema_to_pydantic_type(
all_of_schemas[0], root_schema, name_=name_
)
merged = _merge_all_of_schemas(all_of_schemas, root_schema)
return _json_schema_to_pydantic_type(merged, root_schema, name_=name_)
type_ = json_schema.get("type")
if type_ == "string":
return str
if type_ == "integer":
return int
if type_ == "number":
return float
if type_ == "boolean":
return bool
if type_ == "array":
items_schema = json_schema.get("items")
if items_schema:
item_type = _json_schema_to_pydantic_type(
items_schema, root_schema, name_=name_
)
return list[item_type] # type: ignore[valid-type]
return list
if type_ == "object":
properties = json_schema.get("properties")
if properties:
json_schema_ = json_schema.copy()
if json_schema_.get("title") is None:
json_schema_["title"] = name_
return create_model_from_schema(json_schema_, root_schema=root_schema)
return dict
if type_ == "null":
return None
if type_ is None:
return Any
raise ValueError(f"Unsupported JSON schema type: {type_} from {json_schema}")

View File

@@ -26,5 +26,4 @@ class LLMMessage(TypedDict):
tool_call_id: NotRequired[str] tool_call_id: NotRequired[str]
name: NotRequired[str] name: NotRequired[str]
tool_calls: NotRequired[list[dict[str, Any]]] tool_calls: NotRequired[list[dict[str, Any]]]
raw_tool_call_parts: NotRequired[list[Any]]
files: NotRequired[dict[str, FileInput]] files: NotRequired[dict[str, FileInput]]

View File

@@ -1,224 +0,0 @@
interactions:
- request:
body: '{"messages":[{"role":"system","content":"You are Calculator. You are a
calculator assistant\nYour personal goal is: Perform calculations"},{"role":"user","content":"\nCurrent
Task: What is 7 times 6? Use the multiply_numbers tool.\n\nThis is VERY important
to you, your job depends on it!"}],"model":"gpt-4.1-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"multiply_numbers","description":"Multiply
two numbers together.","parameters":{"properties":{"a":{"title":"A","type":"integer"},"b":{"title":"B","type":"integer"}},"required":["a","b"],"type":"object"}}}]}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '589'
content-type:
- application/json
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.13.3
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D2gblVDQeSH6tTrJiUtxgjoVoPuAR\",\n \"object\":
\"chat.completion\",\n \"created\": 1769532813,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
\ \"id\": \"call_gO6PtjoOIDVeDWs7Wf680BHh\",\n \"type\":
\"function\",\n \"function\": {\n \"name\": \"multiply_numbers\",\n
\ \"arguments\": \"{\\\"a\\\":7,\\\"b\\\":6}\"\n }\n
\ }\n ],\n \"refusal\": null,\n \"annotations\":
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 100,\n \"completion_tokens\":
18,\n \"total_tokens\": 118,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Tue, 27 Jan 2026 16:53:34 GMT
Server:
- cloudflare
Set-Cookie:
- SET-COOKIE-XXX
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '593'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Calculator. You are a
calculator assistant\nYour personal goal is: Perform calculations"},{"role":"user","content":"\nCurrent
Task: What is 7 times 6? Use the multiply_numbers tool.\n\nThis is VERY important
to you, your job depends on it!"},{"role":"assistant","content":null,"tool_calls":[{"id":"call_gO6PtjoOIDVeDWs7Wf680BHh","type":"function","function":{"name":"multiply_numbers","arguments":"{\"a\":7,\"b\":6}"}}]},{"role":"tool","tool_call_id":"call_gO6PtjoOIDVeDWs7Wf680BHh","name":"multiply_numbers","content":"42"},{"role":"user","content":"Analyze
the tool result. If requirements are met, provide the Final Answer. Otherwise,
call the next tool. Deliver only the answer without meta-commentary."}],"model":"gpt-4.1-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"multiply_numbers","description":"Multiply
two numbers together.","parameters":{"properties":{"a":{"title":"A","type":"integer"},"b":{"title":"B","type":"integer"}},"required":["a","b"],"type":"object"}}}]}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1056'
content-type:
- application/json
cookie:
- COOKIE-XXX
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.13.3
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D2gbm9NaGCXkI3QwW3eOTFSP4L4lh\",\n \"object\":
\"chat.completion\",\n \"created\": 1769532814,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"42\",\n \"refusal\": null,\n
\ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\":
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 162,\n \"completion_tokens\":
2,\n \"total_tokens\": 164,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Tue, 27 Jan 2026 16:53:34 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '259'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
version: 1

View File

@@ -1,351 +0,0 @@
interactions:
- request:
body: '{"messages":[{"role":"system","content":"You are Test Agent. You are a
test agent\nYour personal goal is: Try to use the dangerous operation tool"},{"role":"user","content":"\nCurrent
Task: Use the dangerous_operation tool with action ''delete_all''.\n\nThis is
the expected criteria for your final answer: The result of the operation\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nThis
is VERY important to you, your job depends on it!"}],"model":"gpt-4.1-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"dangerous_operation","description":"Perform
a dangerous operation that should be blocked.","parameters":{"properties":{"action":{"title":"Action","type":"string"}},"required":["action"],"type":"object"}}}]}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '773'
content-type:
- application/json
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.13.3
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D2giKEOxBDVqJVqVECwcFjbzdQKSA\",\n \"object\":
\"chat.completion\",\n \"created\": 1769533220,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
\ \"id\": \"call_3OM1qS0QaWqhiJaHyJbNz1ME\",\n \"type\":
\"function\",\n \"function\": {\n \"name\": \"dangerous_operation\",\n
\ \"arguments\": \"{\\\"action\\\":\\\"delete_all\\\"}\"\n }\n
\ }\n ],\n \"refusal\": null,\n \"annotations\":
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 133,\n \"completion_tokens\":
17,\n \"total_tokens\": 150,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Tue, 27 Jan 2026 17:00:20 GMT
Server:
- cloudflare
Set-Cookie:
- SET-COOKIE-XXX
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '484'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Test Agent. You are a
test agent\nYour personal goal is: Try to use the dangerous operation tool"},{"role":"user","content":"\nCurrent
Task: Use the dangerous_operation tool with action ''delete_all''.\n\nThis is
the expected criteria for your final answer: The result of the operation\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nThis
is VERY important to you, your job depends on it!"},{"role":"assistant","content":null,"tool_calls":[{"id":"call_3OM1qS0QaWqhiJaHyJbNz1ME","type":"function","function":{"name":"dangerous_operation","arguments":"{\"action\":\"delete_all\"}"}}]},{"role":"tool","tool_call_id":"call_3OM1qS0QaWqhiJaHyJbNz1ME","name":"dangerous_operation","content":"Tool
execution blocked by hook. Tool: dangerous_operation"},{"role":"user","content":"Analyze
the tool result. If requirements are met, provide the Final Answer. Otherwise,
call the next tool. Deliver only the answer without meta-commentary."}],"model":"gpt-4.1-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"dangerous_operation","description":"Perform
a dangerous operation that should be blocked.","parameters":{"properties":{"action":{"title":"Action","type":"string"}},"required":["action"],"type":"object"}}}]}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1311'
content-type:
- application/json
cookie:
- COOKIE-XXX
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.13.3
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D2giLnD91JxhK0yXninQ7oHYttNDY\",\n \"object\":
\"chat.completion\",\n \"created\": 1769533221,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
\ \"id\": \"call_qF1c2e31GgjoSNJx0HBxI3zX\",\n \"type\":
\"function\",\n \"function\": {\n \"name\": \"dangerous_operation\",\n
\ \"arguments\": \"{\\\"action\\\":\\\"delete_all\\\"}\"\n }\n
\ }\n ],\n \"refusal\": null,\n \"annotations\":
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 204,\n \"completion_tokens\":
17,\n \"total_tokens\": 221,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Tue, 27 Jan 2026 17:00:21 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '447'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Test Agent. You are a
test agent\nYour personal goal is: Try to use the dangerous operation tool"},{"role":"user","content":"\nCurrent
Task: Use the dangerous_operation tool with action ''delete_all''.\n\nThis is
the expected criteria for your final answer: The result of the operation\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nThis
is VERY important to you, your job depends on it!"},{"role":"assistant","content":null,"tool_calls":[{"id":"call_3OM1qS0QaWqhiJaHyJbNz1ME","type":"function","function":{"name":"dangerous_operation","arguments":"{\"action\":\"delete_all\"}"}}]},{"role":"tool","tool_call_id":"call_3OM1qS0QaWqhiJaHyJbNz1ME","name":"dangerous_operation","content":"Tool
execution blocked by hook. Tool: dangerous_operation"},{"role":"user","content":"Analyze
the tool result. If requirements are met, provide the Final Answer. Otherwise,
call the next tool. Deliver only the answer without meta-commentary."},{"role":"assistant","content":null,"tool_calls":[{"id":"call_qF1c2e31GgjoSNJx0HBxI3zX","type":"function","function":{"name":"dangerous_operation","arguments":"{\"action\":\"delete_all\"}"}}]},{"role":"tool","tool_call_id":"call_qF1c2e31GgjoSNJx0HBxI3zX","name":"dangerous_operation","content":"Tool
execution blocked by hook. Tool: dangerous_operation"},{"role":"user","content":"Analyze
the tool result. If requirements are met, provide the Final Answer. Otherwise,
call the next tool. Deliver only the answer without meta-commentary."}],"model":"gpt-4.1-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"dangerous_operation","description":"Perform
a dangerous operation that should be blocked.","parameters":{"properties":{"action":{"title":"Action","type":"string"}},"required":["action"],"type":"object"}}}]}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1849'
content-type:
- application/json
cookie:
- COOKIE-XXX
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.13.3
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D2giM1tAvEOCNwDw1qNmNUN5PIg2Y\",\n \"object\":
\"chat.completion\",\n \"created\": 1769533222,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"The dangerous_operation tool with action
'delete_all' was blocked and did not execute. There is no result from the
operation to provide.\",\n \"refusal\": null,\n \"annotations\":
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 275,\n \"completion_tokens\":
28,\n \"total_tokens\": 303,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Tue, 27 Jan 2026 17:00:22 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '636'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
version: 1

View File

@@ -1,230 +0,0 @@
interactions:
- request:
body: '{"messages":[{"role":"system","content":"You are Math Assistant. You are
a math assistant that helps with division\nYour personal goal is: Perform division
calculations accurately"},{"role":"user","content":"\nCurrent Task: Calculate
100 divided by 4 using the divide_numbers tool.\n\nThis is the expected criteria
for your final answer: The result of the division\nyou MUST return the actual
complete content as the final answer, not a summary.\n\nThis is VERY important
to you, your job depends on it!"}],"model":"gpt-4.1-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"divide_numbers","description":"Divide
first number by second number.","parameters":{"properties":{"a":{"title":"A","type":"integer"},"b":{"title":"B","type":"integer"}},"required":["a","b"],"type":"object"}}}]}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '809'
content-type:
- application/json
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.13.3
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D2gbkWUn8InDLeD1Cf8w0LxiUQOIS\",\n \"object\":
\"chat.completion\",\n \"created\": 1769532812,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
\ \"id\": \"call_gwIV3i71RNqfpr7KguEciCuV\",\n \"type\":
\"function\",\n \"function\": {\n \"name\": \"divide_numbers\",\n
\ \"arguments\": \"{\\\"a\\\":100,\\\"b\\\":4}\"\n }\n
\ }\n ],\n \"refusal\": null,\n \"annotations\":
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 140,\n \"completion_tokens\":
18,\n \"total_tokens\": 158,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Tue, 27 Jan 2026 16:53:32 GMT
Server:
- cloudflare
Set-Cookie:
- SET-COOKIE-XXX
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '435'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Math Assistant. You are
a math assistant that helps with division\nYour personal goal is: Perform division
calculations accurately"},{"role":"user","content":"\nCurrent Task: Calculate
100 divided by 4 using the divide_numbers tool.\n\nThis is the expected criteria
for your final answer: The result of the division\nyou MUST return the actual
complete content as the final answer, not a summary.\n\nThis is VERY important
to you, your job depends on it!"},{"role":"assistant","content":null,"tool_calls":[{"id":"call_gwIV3i71RNqfpr7KguEciCuV","type":"function","function":{"name":"divide_numbers","arguments":"{\"a\":100,\"b\":4}"}}]},{"role":"tool","tool_call_id":"call_gwIV3i71RNqfpr7KguEciCuV","name":"divide_numbers","content":"25.0"},{"role":"user","content":"Analyze
the tool result. If requirements are met, provide the Final Answer. Otherwise,
call the next tool. Deliver only the answer without meta-commentary."}],"model":"gpt-4.1-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"divide_numbers","description":"Divide
first number by second number.","parameters":{"properties":{"a":{"title":"A","type":"integer"},"b":{"title":"B","type":"integer"}},"required":["a","b"],"type":"object"}}}]}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1276'
content-type:
- application/json
cookie:
- COOKIE-XXX
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.13.3
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D2gbkHw19D5oEBOhpZP5FR5MvRFgb\",\n \"object\":
\"chat.completion\",\n \"created\": 1769532812,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"25.0\",\n \"refusal\": null,\n
\ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\":
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 204,\n \"completion_tokens\":
4,\n \"total_tokens\": 208,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Tue, 27 Jan 2026 16:53:33 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '523'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
version: 1

View File

@@ -1,22 +1,7 @@
interactions: interactions:
- request: - request:
body: '{"messages":[{"role":"system","content":"You are Calculator Assistant. body: '{"messages":[{"role":"system","content":"You are Calculator Assistant. You are a helpful calculator assistant\nYour personal goal is: Help with math calculations\n\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\nTool Name: calculate_sum\nTool Arguments: {''a'': {''description'': None, ''type'': ''int''}, ''b'': {''description'': None, ''type'': ''int''}}\nTool Description: Add two numbers together.\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought: you should always think about what to do\nAction: the action to take, only one name of [calculate_sum], just the name, exactly as it''s written.\nAction Input: the input to the action, just a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce all necessary information is gathered, return the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: the final
You are a helpful calculator assistant\nYour personal goal is: Help with math answer to the original input question\n```"},{"role":"user","content":"What is 5 + 3? Use the calculate_sum tool."}],"model":"gpt-4.1-mini"}'
calculations\n\nYou ONLY have access to the following tools, and should NEVER
make up tools that are not listed here:\n\nTool Name: calculate_sum\nTool Arguments:
{\n \"properties\": {\n \"a\": {\n \"title\": \"A\",\n \"type\":
\"integer\"\n },\n \"b\": {\n \"title\": \"B\",\n \"type\":
\"integer\"\n }\n },\n \"required\": [\n \"a\",\n \"b\"\n ],\n \"title\":
\"Calculate_Sum\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\nTool
Description: Add two numbers together.\n\nIMPORTANT: Use the following format
in your response:\n\n```\nThought: you should always think about what to do\nAction:
the action to take, only one name of [calculate_sum], just the name, exactly
as it''s written.\nAction Input: the input to the action, just a simple JSON
object, enclosed in curly braces, using \" to wrap keys and values.\nObservation:
the result of the action\n```\n\nOnce all necessary information is gathered,
return the following format:\n\n```\nThought: I now know the final answer\nFinal
Answer: the final answer to the original input question\n```"},{"role":"user","content":"What
is 5 + 3? Use the calculate_sum tool."}],"model":"gpt-4.1-mini"}'
headers: headers:
User-Agent: User-Agent:
- X-USER-AGENT-XXX - X-USER-AGENT-XXX
@@ -29,7 +14,7 @@ interactions:
connection: connection:
- keep-alive - keep-alive
content-length: content-length:
- '1356' - '1119'
content-type: content-type:
- application/json - application/json
host: host:
@@ -56,18 +41,8 @@ interactions:
uri: https://api.openai.com/v1/chat/completions uri: https://api.openai.com/v1/chat/completions
response: response:
body: body:
string: "{\n \"id\": \"chatcmpl-D2gSz7JfTi4NQ2QRTANg8Z2afJI8b\",\n \"object\": string: "{\n \"id\": \"chatcmpl-CiksV15hVLWURKZH4BxQEGjiCFWpz\",\n \"object\": \"chat.completion\",\n \"created\": 1764782667,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"```\\nThought: I should use the calculate_sum tool to add 5 and 3.\\nAction: calculate_sum\\nAction Input: {\\\"a\\\": 5, \\\"b\\\": 3}\\n```\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 234,\n \"completion_tokens\": 40,\n \"total_tokens\": 274,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\"\
\"chat.completion\",\n \"created\": 1769532269,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n : \"default\",\n \"system_fingerprint\": \"fp_9766e549b2\"\n}\n"
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"```\\nThought: I need to use the calculate_sum
tool to find the sum of 5 and 3\\nAction: calculate_sum\\nAction Input: {\\\"a\\\":5,\\\"b\\\":3}\\n```\",\n
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\":
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
295,\n \"completion_tokens\": 41,\n \"total_tokens\": 336,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
headers: headers:
CF-RAY: CF-RAY:
- CF-RAY-XXX - CF-RAY-XXX
@@ -76,7 +51,7 @@ interactions:
Content-Type: Content-Type:
- application/json - application/json
Date: Date:
- Tue, 27 Jan 2026 16:44:30 GMT - Wed, 03 Dec 2025 17:24:28 GMT
Server: Server:
- cloudflare - cloudflare
Set-Cookie: Set-Cookie:
@@ -96,11 +71,13 @@ interactions:
openai-organization: openai-organization:
- OPENAI-ORG-XXX - OPENAI-ORG-XXX
openai-processing-ms: openai-processing-ms:
- '827' - '681'
openai-project: openai-project:
- OPENAI-PROJECT-XXX - OPENAI-PROJECT-XXX
openai-version: openai-version:
- '2020-10-01' - '2020-10-01'
x-envoy-upstream-service-time:
- '871'
x-openai-proxy-wasm: x-openai-proxy-wasm:
- v0.1 - v0.1
x-ratelimit-limit-requests: x-ratelimit-limit-requests:
@@ -121,25 +98,8 @@ interactions:
code: 200 code: 200
message: OK message: OK
- request: - request:
body: '{"messages":[{"role":"system","content":"You are Calculator Assistant. body: '{"messages":[{"role":"system","content":"You are Calculator Assistant. You are a helpful calculator assistant\nYour personal goal is: Help with math calculations\n\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\nTool Name: calculate_sum\nTool Arguments: {''a'': {''description'': None, ''type'': ''int''}, ''b'': {''description'': None, ''type'': ''int''}}\nTool Description: Add two numbers together.\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought: you should always think about what to do\nAction: the action to take, only one name of [calculate_sum], just the name, exactly as it''s written.\nAction Input: the input to the action, just a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce all necessary information is gathered, return the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: the final
You are a helpful calculator assistant\nYour personal goal is: Help with math answer to the original input question\n```"},{"role":"user","content":"What is 5 + 3? Use the calculate_sum tool."},{"role":"assistant","content":"```\nThought: I should use the calculate_sum tool to add 5 and 3.\nAction: calculate_sum\nAction Input: {\"a\": 5, \"b\": 3}\n```\nObservation: 8"}],"model":"gpt-4.1-mini"}'
calculations\n\nYou ONLY have access to the following tools, and should NEVER
make up tools that are not listed here:\n\nTool Name: calculate_sum\nTool Arguments:
{\n \"properties\": {\n \"a\": {\n \"title\": \"A\",\n \"type\":
\"integer\"\n },\n \"b\": {\n \"title\": \"B\",\n \"type\":
\"integer\"\n }\n },\n \"required\": [\n \"a\",\n \"b\"\n ],\n \"title\":
\"Calculate_Sum\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\nTool
Description: Add two numbers together.\n\nIMPORTANT: Use the following format
in your response:\n\n```\nThought: you should always think about what to do\nAction:
the action to take, only one name of [calculate_sum], just the name, exactly
as it''s written.\nAction Input: the input to the action, just a simple JSON
object, enclosed in curly braces, using \" to wrap keys and values.\nObservation:
the result of the action\n```\n\nOnce all necessary information is gathered,
return the following format:\n\n```\nThought: I now know the final answer\nFinal
Answer: the final answer to the original input question\n```"},{"role":"user","content":"What
is 5 + 3? Use the calculate_sum tool."},{"role":"assistant","content":"```\nThought:
I need to use the calculate_sum tool to find the sum of 5 and 3\nAction: calculate_sum\nAction
Input: {\"a\":5,\"b\":3}\n```\nObservation: 8"}],"model":"gpt-4.1-mini"}'
headers: headers:
User-Agent: User-Agent:
- X-USER-AGENT-XXX - X-USER-AGENT-XXX
@@ -152,7 +112,7 @@ interactions:
connection: connection:
- keep-alive - keep-alive
content-length: content-length:
- '1544' - '1298'
content-type: content-type:
- application/json - application/json
cookie: cookie:
@@ -181,18 +141,7 @@ interactions:
uri: https://api.openai.com/v1/chat/completions uri: https://api.openai.com/v1/chat/completions
response: response:
body: body:
string: "{\n \"id\": \"chatcmpl-D2gT0RU66XqjAUOXnGmokD1Q8Fman\",\n \"object\": string: "{\n \"id\": \"chatcmpl-CiksWrVbyJFurKCm7XPRU1b1pT7qF\",\n \"object\": \"chat.completion\",\n \"created\": 1764782668,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"```\\nThought: I now know the final answer\\nFinal Answer: 8\\n```\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 283,\n \"completion_tokens\": 18,\n \"total_tokens\": 301,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_9766e549b2\"\n}\n"
\"chat.completion\",\n \"created\": 1769532270,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"```\\nThought: I now know the final
answer\\nFinal Answer: 8\\n```\",\n \"refusal\": null,\n \"annotations\":
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 345,\n \"completion_tokens\":
18,\n \"total_tokens\": 363,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
headers: headers:
CF-RAY: CF-RAY:
- CF-RAY-XXX - CF-RAY-XXX
@@ -201,7 +150,7 @@ interactions:
Content-Type: Content-Type:
- application/json - application/json
Date: Date:
- Tue, 27 Jan 2026 16:44:31 GMT - Wed, 03 Dec 2025 17:24:29 GMT
Server: Server:
- cloudflare - cloudflare
Strict-Transport-Security: Strict-Transport-Security:
@@ -219,11 +168,208 @@ interactions:
openai-organization: openai-organization:
- OPENAI-ORG-XXX - OPENAI-ORG-XXX
openai-processing-ms: openai-processing-ms:
- '606' - '427'
openai-project: openai-project:
- OPENAI-PROJECT-XXX - OPENAI-PROJECT-XXX
openai-version: openai-version:
- '2020-10-01' - '2020-10-01'
x-envoy-upstream-service-time:
- '442'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Calculator Assistant. You are a helpful calculator assistant\nYour personal goal is: Help with math calculations\n\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\nTool Name: calculate_sum\nTool Arguments: {''a'': {''description'': None, ''type'': ''int''}, ''b'': {''description'': None, ''type'': ''int''}}\nTool Description: Add two numbers together.\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought: you should always think about what to do\nAction: the action to take, only one name of [calculate_sum], just the name, exactly as it''s written.\nAction Input: the input to the action, just a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce all necessary information is gathered, return the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: the final
answer to the original input question\n```"},{"role":"user","content":"What is 5 + 3? Use the calculate_sum tool."}],"model":"gpt-4.1-mini"}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1119'
content-type:
- application/json
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.13.3
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-CimX8hwYiUUZijApUDk1yBMzTpBj9\",\n \"object\": \"chat.completion\",\n \"created\": 1764789030,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"```\\nThought: I need to add 5 and 3 using the calculate_sum tool.\\nAction: calculate_sum\\nAction Input: {\\\"a\\\":5,\\\"b\\\":3}\\n```\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 234,\n \"completion_tokens\": 37,\n \"total_tokens\": 271,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\"\
: \"default\",\n \"system_fingerprint\": \"fp_9766e549b2\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Wed, 03 Dec 2025 19:10:33 GMT
Server:
- cloudflare
Set-Cookie:
- SET-COOKIE-XXX
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '2329'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '2349'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Calculator Assistant. You are a helpful calculator assistant\nYour personal goal is: Help with math calculations\n\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\nTool Name: calculate_sum\nTool Arguments: {''a'': {''description'': None, ''type'': ''int''}, ''b'': {''description'': None, ''type'': ''int''}}\nTool Description: Add two numbers together.\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought: you should always think about what to do\nAction: the action to take, only one name of [calculate_sum], just the name, exactly as it''s written.\nAction Input: the input to the action, just a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce all necessary information is gathered, return the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: the final
answer to the original input question\n```"},{"role":"user","content":"What is 5 + 3? Use the calculate_sum tool."},{"role":"assistant","content":"```\nThought: I need to add 5 and 3 using the calculate_sum tool.\nAction: calculate_sum\nAction Input: {\"a\":5,\"b\":3}\n```\nObservation: 8"}],"model":"gpt-4.1-mini"}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1295'
content-type:
- application/json
cookie:
- COOKIE-XXX
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.13.3
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-CimXBrY5sdbr2pJnqGlazPTra4dor\",\n \"object\": \"chat.completion\",\n \"created\": 1764789033,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"```\\nThought: I now know the final answer\\nFinal Answer: 8\\n```\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 280,\n \"completion_tokens\": 18,\n \"total_tokens\": 298,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_9766e549b2\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Wed, 03 Dec 2025 19:10:35 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '1647'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '1694'
x-openai-proxy-wasm: x-openai-proxy-wasm:
- v0.1 - v0.1
x-ratelimit-limit-requests: x-ratelimit-limit-requests:

View File

@@ -590,233 +590,3 @@ class TestToolHooksIntegration:
# Clean up hooks # Clean up hooks
unregister_before_tool_call_hook(before_tool_call_hook) unregister_before_tool_call_hook(before_tool_call_hook)
unregister_after_tool_call_hook(after_tool_call_hook) unregister_after_tool_call_hook(after_tool_call_hook)
class TestNativeToolCallingHooksIntegration:
"""Integration tests for hooks with native function calling (Agent and Crew)."""
@pytest.mark.vcr()
def test_agent_native_tool_hooks_before_and_after(self):
"""Test that Agent with native tool calling executes before/after hooks."""
import os
from crewai import Agent
from crewai.tools import tool
hook_calls = {"before": [], "after": []}
@tool("multiply_numbers")
def multiply_numbers(a: int, b: int) -> int:
"""Multiply two numbers together."""
return a * b
def before_hook(context: ToolCallHookContext) -> bool | None:
hook_calls["before"].append({
"tool_name": context.tool_name,
"tool_input": dict(context.tool_input),
"has_agent": context.agent is not None,
})
return None
def after_hook(context: ToolCallHookContext) -> str | None:
hook_calls["after"].append({
"tool_name": context.tool_name,
"tool_result": context.tool_result,
"has_agent": context.agent is not None,
})
return None
register_before_tool_call_hook(before_hook)
register_after_tool_call_hook(after_hook)
try:
agent = Agent(
role="Calculator",
goal="Perform calculations",
backstory="You are a calculator assistant",
tools=[multiply_numbers],
verbose=True,
)
agent.kickoff(
messages="What is 7 times 6? Use the multiply_numbers tool."
)
# Verify before hook was called
assert len(hook_calls["before"]) > 0, "Before hook was never called"
before_call = hook_calls["before"][0]
assert before_call["tool_name"] == "multiply_numbers"
assert "a" in before_call["tool_input"]
assert "b" in before_call["tool_input"]
assert before_call["has_agent"] is True
# Verify after hook was called
assert len(hook_calls["after"]) > 0, "After hook was never called"
after_call = hook_calls["after"][0]
assert after_call["tool_name"] == "multiply_numbers"
assert "42" in str(after_call["tool_result"])
assert after_call["has_agent"] is True
finally:
unregister_before_tool_call_hook(before_hook)
unregister_after_tool_call_hook(after_hook)
@pytest.mark.vcr()
def test_crew_native_tool_hooks_before_and_after(self):
"""Test that Crew with Agent executes before/after hooks with full context."""
import os
from crewai import Agent, Crew, Task
from crewai.tools import tool
hook_calls = {"before": [], "after": []}
@tool("divide_numbers")
def divide_numbers(a: int, b: int) -> float:
"""Divide first number by second number."""
return a / b
def before_hook(context: ToolCallHookContext) -> bool | None:
hook_calls["before"].append({
"tool_name": context.tool_name,
"tool_input": dict(context.tool_input),
"has_agent": context.agent is not None,
"has_task": context.task is not None,
"has_crew": context.crew is not None,
"agent_role": context.agent.role if context.agent else None,
})
return None
def after_hook(context: ToolCallHookContext) -> str | None:
hook_calls["after"].append({
"tool_name": context.tool_name,
"tool_result": context.tool_result,
"has_agent": context.agent is not None,
"has_task": context.task is not None,
"has_crew": context.crew is not None,
})
return None
register_before_tool_call_hook(before_hook)
register_after_tool_call_hook(after_hook)
try:
agent = Agent(
role="Math Assistant",
goal="Perform division calculations accurately",
backstory="You are a math assistant that helps with division",
tools=[divide_numbers],
verbose=True,
)
task = Task(
description="Calculate 100 divided by 4 using the divide_numbers tool.",
expected_output="The result of the division",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task],
verbose=True,
)
crew.kickoff()
# Verify before hook was called with full context
assert len(hook_calls["before"]) > 0, "Before hook was never called"
before_call = hook_calls["before"][0]
assert before_call["tool_name"] == "divide_numbers"
assert "a" in before_call["tool_input"]
assert "b" in before_call["tool_input"]
assert before_call["has_agent"] is True
assert before_call["has_task"] is True
assert before_call["has_crew"] is True
assert before_call["agent_role"] == "Math Assistant"
# Verify after hook was called with full context
assert len(hook_calls["after"]) > 0, "After hook was never called"
after_call = hook_calls["after"][0]
assert after_call["tool_name"] == "divide_numbers"
assert "25" in str(after_call["tool_result"])
assert after_call["has_agent"] is True
assert after_call["has_task"] is True
assert after_call["has_crew"] is True
finally:
unregister_before_tool_call_hook(before_hook)
unregister_after_tool_call_hook(after_hook)
@pytest.mark.vcr()
def test_before_hook_blocks_tool_execution_in_crew(self):
"""Test that returning False from before hook blocks tool execution."""
import os
from crewai import Agent, Crew, Task
from crewai.tools import tool
hook_calls = {"before": [], "after": [], "tool_executed": False}
@tool("dangerous_operation")
def dangerous_operation(action: str) -> str:
"""Perform a dangerous operation that should be blocked."""
hook_calls["tool_executed"] = True
return f"Executed: {action}"
def blocking_before_hook(context: ToolCallHookContext) -> bool | None:
hook_calls["before"].append({
"tool_name": context.tool_name,
"tool_input": dict(context.tool_input),
})
# Block all calls to dangerous_operation
if context.tool_name == "dangerous_operation":
return False
return None
def after_hook(context: ToolCallHookContext) -> str | None:
hook_calls["after"].append({
"tool_name": context.tool_name,
"tool_result": context.tool_result,
})
return None
register_before_tool_call_hook(blocking_before_hook)
register_after_tool_call_hook(after_hook)
try:
agent = Agent(
role="Test Agent",
goal="Try to use the dangerous operation tool",
backstory="You are a test agent",
tools=[dangerous_operation],
verbose=True,
)
task = Task(
description="Use the dangerous_operation tool with action 'delete_all'.",
expected_output="The result of the operation",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task],
verbose=True,
)
crew.kickoff()
# Verify before hook was called
assert len(hook_calls["before"]) > 0, "Before hook was never called"
before_call = hook_calls["before"][0]
assert before_call["tool_name"] == "dangerous_operation"
# Verify the actual tool function was NOT executed
assert hook_calls["tool_executed"] is False, "Tool should have been blocked"
# Verify after hook was still called (with blocked message)
assert len(hook_calls["after"]) > 0, "After hook was never called"
after_call = hook_calls["after"][0]
assert "blocked" in after_call["tool_result"].lower()
finally:
unregister_before_tool_call_hook(blocking_before_hook)
unregister_after_tool_call_hook(after_hook)

View File

@@ -1,6 +1,6 @@
import os import os
import threading import threading
from unittest.mock import patch from unittest.mock import MagicMock, patch
import pytest import pytest
from crewai import Agent, Crew, Task from crewai import Agent, Crew, Task
@@ -121,3 +121,90 @@ def test_telemetry_singleton_pattern():
thread.join() thread.join()
assert all(instance is telemetry1 for instance in instances) assert all(instance is telemetry1 for instance in instances)
def test_signal_handler_registration_skipped_in_non_main_thread():
"""Test that signal handler registration is skipped when running from a non-main thread.
This test verifies that when Telemetry is initialized from a non-main thread,
the signal handler registration is skipped without raising noisy ValueError tracebacks.
See: https://github.com/crewAIInc/crewAI/issues/4289
"""
Telemetry._instance = None
result = {"register_signal_handler_called": False, "error": None}
def init_telemetry_in_thread():
try:
with patch("crewai.telemetry.telemetry.TracerProvider"):
with patch.object(
Telemetry,
"_register_signal_handler",
wraps=lambda *args, **kwargs: None,
) as mock_register:
telemetry = Telemetry()
result["register_signal_handler_called"] = mock_register.called
result["telemetry"] = telemetry
except Exception as e:
result["error"] = e
thread = threading.Thread(target=init_telemetry_in_thread)
thread.start()
thread.join()
assert result["error"] is None, f"Unexpected error: {result['error']}"
assert (
result["register_signal_handler_called"] is False
), "Signal handler should not be registered in non-main thread"
def test_signal_handler_registration_skipped_logs_debug_message():
"""Test that a debug message is logged when signal handler registration is skipped.
This test verifies that when Telemetry is initialized from a non-main thread,
a debug message is logged indicating that signal handler registration was skipped.
"""
Telemetry._instance = None
result = {"telemetry": None, "error": None, "debug_calls": []}
mock_logger_debug = MagicMock()
def init_telemetry_in_thread():
try:
with patch("crewai.telemetry.telemetry.TracerProvider"):
with patch(
"crewai.telemetry.telemetry.logger.debug", mock_logger_debug
):
result["telemetry"] = Telemetry()
result["debug_calls"] = [
str(call) for call in mock_logger_debug.call_args_list
]
except Exception as e:
result["error"] = e
thread = threading.Thread(target=init_telemetry_in_thread)
thread.start()
thread.join()
assert result["error"] is None, f"Unexpected error: {result['error']}"
assert result["telemetry"] is not None
debug_calls = result["debug_calls"]
assert any(
"Skipping signal handler registration" in call for call in debug_calls
), f"Expected debug message about skipping signal handler registration, got: {debug_calls}"
def test_signal_handlers_registered_in_main_thread():
"""Test that signal handlers are registered when running from the main thread."""
Telemetry._instance = None
with patch("crewai.telemetry.telemetry.TracerProvider"):
with patch(
"crewai.telemetry.telemetry.Telemetry._register_signal_handler"
) as mock_register:
telemetry = Telemetry()
assert telemetry.ready is True
assert mock_register.call_count >= 2

View File

@@ -1,3 +1,3 @@
"""CrewAI development tools.""" """CrewAI development tools."""
__version__ = "1.9.1" __version__ = "1.9.0"