mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-07 15:18:29 +00:00
Compare commits
11 Commits
devin/1763
...
gl/chore/u
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2cd5a30873 | ||
|
|
02580f58d1 | ||
|
|
8b83bf3e54 | ||
|
|
93f1fbd75e | ||
|
|
0803318002 | ||
|
|
6fb13ee3e0 | ||
|
|
67e39073c7 | ||
|
|
722d316824 | ||
|
|
a824d52e5e | ||
|
|
d8fe83f76c | ||
|
|
46785adf58 |
@@ -402,77 +402,6 @@ crewai config reset
|
||||
After resetting configuration, re-run `crewai login` to authenticate again.
|
||||
</Tip>
|
||||
|
||||
### 14. Trace Management
|
||||
|
||||
Manage trace collection preferences for your Crew and Flow executions.
|
||||
|
||||
```shell Terminal
|
||||
crewai traces [COMMAND]
|
||||
```
|
||||
|
||||
#### Commands:
|
||||
|
||||
- `enable`: Enable trace collection for crew/flow executions
|
||||
```shell Terminal
|
||||
crewai traces enable
|
||||
```
|
||||
|
||||
- `disable`: Disable trace collection for crew/flow executions
|
||||
```shell Terminal
|
||||
crewai traces disable
|
||||
```
|
||||
|
||||
- `status`: Show current trace collection status
|
||||
```shell Terminal
|
||||
crewai traces status
|
||||
```
|
||||
|
||||
#### How Tracing Works
|
||||
|
||||
Trace collection is controlled by checking three settings in priority order:
|
||||
|
||||
1. **Explicit flag in code** (highest priority - can enable OR disable):
|
||||
```python
|
||||
crew = Crew(agents=[...], tasks=[...], tracing=True) # Always enable
|
||||
crew = Crew(agents=[...], tasks=[...], tracing=False) # Always disable
|
||||
crew = Crew(agents=[...], tasks=[...]) # Check lower priorities (default)
|
||||
```
|
||||
- `tracing=True` will **always enable** tracing (overrides everything)
|
||||
- `tracing=False` will **always disable** tracing (overrides everything)
|
||||
- `tracing=None` or omitted will check lower priority settings
|
||||
|
||||
2. **Environment variable** (second priority):
|
||||
```env
|
||||
CREWAI_TRACING_ENABLED=true
|
||||
```
|
||||
- Checked only if `tracing` is not explicitly set to `True` or `False` in code
|
||||
- Set to `true` or `1` to enable tracing
|
||||
|
||||
3. **User preference** (lowest priority):
|
||||
```shell Terminal
|
||||
crewai traces enable
|
||||
```
|
||||
- Checked only if `tracing` is not set in code and `CREWAI_TRACING_ENABLED` is not set to `true`
|
||||
- Running `crewai traces enable` is sufficient to enable tracing by itself
|
||||
|
||||
<Note>
|
||||
**To enable tracing**, use any one of these methods:
|
||||
- Set `tracing=True` in your Crew/Flow code, OR
|
||||
- Add `CREWAI_TRACING_ENABLED=true` to your `.env` file, OR
|
||||
- Run `crewai traces enable`
|
||||
|
||||
**To disable tracing**, use any ONE of these methods:
|
||||
- Set `tracing=False` in your Crew/Flow code (overrides everything), OR
|
||||
- Remove or set to `false` the `CREWAI_TRACING_ENABLED` env var, OR
|
||||
- Run `crewai traces disable`
|
||||
|
||||
Higher priority settings override lower ones.
|
||||
</Note>
|
||||
|
||||
<Tip>
|
||||
For more information about tracing, see the [Tracing documentation](/observability/tracing).
|
||||
</Tip>
|
||||
|
||||
<Tip>
|
||||
CrewAI CLI handles authentication to the Tool Repository automatically when adding packages to your project. Just append `crewai` before any `uv` command to use it. E.g. `crewai uv add requests`. For more information, see [Tool Repository](https://docs.crewai.com/enterprise/features/tool-repository) docs.
|
||||
</Tip>
|
||||
|
||||
@@ -1212,7 +1212,7 @@ Learn how to get the most out of your LLM configuration:
|
||||
```python
|
||||
import httpx
|
||||
from crewai import LLM
|
||||
from crewai.llms.hooks import BaseInterceptor
|
||||
from crewai.llm.hooks import BaseInterceptor
|
||||
|
||||
class CustomInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
|
||||
"""Custom interceptor to modify requests and responses."""
|
||||
|
||||
@@ -12,7 +12,7 @@ dependencies = [
|
||||
"pytube>=15.0.0",
|
||||
"requests>=2.32.5",
|
||||
"docker>=7.1.0",
|
||||
"crewai==1.5.0",
|
||||
"crewai==1.4.1",
|
||||
"lancedb>=0.5.4",
|
||||
"tiktoken>=0.8.0",
|
||||
"beautifulsoup4>=4.13.4",
|
||||
|
||||
@@ -287,4 +287,4 @@ __all__ = [
|
||||
"ZapierActionTools",
|
||||
]
|
||||
|
||||
__version__ = "1.5.0"
|
||||
__version__ = "1.4.1"
|
||||
|
||||
@@ -48,7 +48,7 @@ Repository = "https://github.com/crewAIInc/crewAI"
|
||||
|
||||
[project.optional-dependencies]
|
||||
tools = [
|
||||
"crewai-tools==1.5.0",
|
||||
"crewai-tools==1.4.1",
|
||||
]
|
||||
embeddings = [
|
||||
"tiktoken~=0.8.0"
|
||||
|
||||
@@ -8,8 +8,8 @@ from crewai.crew import Crew
|
||||
from crewai.crews.crew_output import CrewOutput
|
||||
from crewai.flow.flow import Flow
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.llm import LLM
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.llm.base_llm import BaseLLM
|
||||
from crewai.llm.core import LLM
|
||||
from crewai.process import Process
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.llm_guardrail import LLMGuardrail
|
||||
@@ -40,7 +40,7 @@ def _suppress_pydantic_deprecation_warnings() -> None:
|
||||
|
||||
_suppress_pydantic_deprecation_warnings()
|
||||
|
||||
__version__ = "1.5.0"
|
||||
__version__ = "1.4.1"
|
||||
_telemetry_submitted = False
|
||||
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@ from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.knowledge.utils.knowledge_utils import extract_knowledge_context
|
||||
from crewai.lite_agent import LiteAgent
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.llm.base_llm import BaseLLM
|
||||
from crewai.mcp import (
|
||||
MCPClient,
|
||||
MCPServerConfig,
|
||||
@@ -633,7 +633,7 @@ class Agent(BaseAgent):
|
||||
)
|
||||
|
||||
self.agent_executor = CrewAgentExecutor(
|
||||
llm=self.llm,
|
||||
llm=self.llm, # type: ignore[arg-type]
|
||||
task=task, # type: ignore[arg-type]
|
||||
agent=self,
|
||||
crew=self.crew,
|
||||
@@ -810,6 +810,7 @@ class Agent(BaseAgent):
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.tools.mcp_native_tool import MCPNativeTool
|
||||
|
||||
transport: StdioTransport | HTTPTransport | SSETransport
|
||||
if isinstance(mcp_config, MCPServerStdio):
|
||||
transport = StdioTransport(
|
||||
command=mcp_config.command,
|
||||
@@ -903,10 +904,12 @@ class Agent(BaseAgent):
|
||||
server_name=server_name,
|
||||
run_context=None,
|
||||
)
|
||||
if mcp_config.tool_filter(context, tool):
|
||||
# Try new signature first
|
||||
if mcp_config.tool_filter(context, tool): # type: ignore[arg-type,call-arg]
|
||||
filtered_tools.append(tool)
|
||||
except (TypeError, AttributeError):
|
||||
if mcp_config.tool_filter(tool):
|
||||
# Fallback to old signature
|
||||
if mcp_config.tool_filter(tool): # type: ignore[arg-type,call-arg]
|
||||
filtered_tools.append(tool)
|
||||
else:
|
||||
# Not callable - include tool
|
||||
@@ -981,7 +984,9 @@ class Agent(BaseAgent):
|
||||
path = parsed.path.replace("/", "_").strip("_")
|
||||
return f"{domain}_{path}" if path else domain
|
||||
|
||||
def _get_mcp_tool_schemas(self, server_params: dict) -> dict[str, dict]:
|
||||
def _get_mcp_tool_schemas(
|
||||
self, server_params: dict[str, Any]
|
||||
) -> dict[str, dict[str, Any]]:
|
||||
"""Get tool schemas from MCP server for wrapper creation with caching."""
|
||||
server_url = server_params["url"]
|
||||
|
||||
@@ -995,7 +1000,7 @@ class Agent(BaseAgent):
|
||||
self._logger.log(
|
||||
"debug", f"Using cached MCP tool schemas for {server_url}"
|
||||
)
|
||||
return cached_data
|
||||
return cast(dict[str, dict[str, Any]], cached_data)
|
||||
|
||||
try:
|
||||
schemas = asyncio.run(self._get_mcp_tool_schemas_async(server_params))
|
||||
@@ -1013,7 +1018,7 @@ class Agent(BaseAgent):
|
||||
|
||||
async def _get_mcp_tool_schemas_async(
|
||||
self, server_params: dict[str, Any]
|
||||
) -> dict[str, dict]:
|
||||
) -> dict[str, dict[str, Any]]:
|
||||
"""Async implementation of MCP tool schema retrieval with timeouts and retries."""
|
||||
server_url = server_params["url"]
|
||||
return await self._retry_mcp_discovery(
|
||||
@@ -1021,7 +1026,7 @@ class Agent(BaseAgent):
|
||||
)
|
||||
|
||||
async def _retry_mcp_discovery(
|
||||
self, operation_func, server_url: str
|
||||
self, operation_func: Any, server_url: str
|
||||
) -> dict[str, dict[str, Any]]:
|
||||
"""Retry MCP discovery operation with exponential backoff, avoiding try-except in loop."""
|
||||
last_error = None
|
||||
@@ -1052,7 +1057,7 @@ class Agent(BaseAgent):
|
||||
|
||||
@staticmethod
|
||||
async def _attempt_mcp_discovery(
|
||||
operation_func, server_url: str
|
||||
operation_func: Any, server_url: str
|
||||
) -> tuple[dict[str, dict[str, Any]] | None, str, bool]:
|
||||
"""Attempt single MCP discovery operation and return (result, error_message, should_retry)."""
|
||||
try:
|
||||
@@ -1156,13 +1161,13 @@ class Agent(BaseAgent):
|
||||
Field(..., description=field_description),
|
||||
)
|
||||
else:
|
||||
field_definitions[field_name] = (
|
||||
field_definitions[field_name] = ( # type: ignore[assignment]
|
||||
field_type | None,
|
||||
Field(default=None, description=field_description),
|
||||
)
|
||||
|
||||
model_name = f"{tool_name.replace('-', '_').replace(' ', '_')}Schema"
|
||||
return create_model(model_name, **field_definitions)
|
||||
return create_model(model_name, **field_definitions) # type: ignore[no-any-return,call-overload]
|
||||
|
||||
def _json_type_to_python(self, field_schema: dict[str, Any]) -> type:
|
||||
"""Convert JSON Schema type to Python type.
|
||||
@@ -1182,16 +1187,16 @@ class Agent(BaseAgent):
|
||||
if "const" in option:
|
||||
types.append(str)
|
||||
else:
|
||||
types.append(self._json_type_to_python(option))
|
||||
types.append(self._json_type_to_python(option)) # type: ignore[arg-type]
|
||||
unique_types = list(set(types))
|
||||
if len(unique_types) > 1:
|
||||
result = unique_types[0]
|
||||
for t in unique_types[1:]:
|
||||
result = result | t
|
||||
result = result | t # type: ignore[assignment]
|
||||
return result
|
||||
return unique_types[0]
|
||||
|
||||
type_mapping = {
|
||||
type_mapping: dict[str, type] = {
|
||||
"string": str,
|
||||
"number": float,
|
||||
"integer": int,
|
||||
@@ -1200,10 +1205,10 @@ class Agent(BaseAgent):
|
||||
"object": dict,
|
||||
}
|
||||
|
||||
return type_mapping.get(json_type, Any)
|
||||
return type_mapping.get(json_type or "", Any)
|
||||
|
||||
@staticmethod
|
||||
def _fetch_amp_mcp_servers(mcp_name: str) -> list[dict]:
|
||||
def _fetch_amp_mcp_servers(mcp_name: str) -> list[dict[str, Any]]:
|
||||
"""Fetch MCP server configurations from CrewAI AMP API."""
|
||||
# TODO: Implement AMP API call to "integrations/mcps" endpoint
|
||||
# Should return list of server configs with URLs
|
||||
|
||||
@@ -137,7 +137,7 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
|
||||
default=False,
|
||||
description="Enable agent to delegate and ask questions among each other.",
|
||||
)
|
||||
tools: list[BaseTool] | None = Field(
|
||||
tools: list[BaseTool] = Field(
|
||||
default_factory=list, description="Tools at agents' disposal"
|
||||
)
|
||||
max_iter: int = Field(
|
||||
@@ -161,7 +161,7 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
|
||||
description="An instance of the ToolsHandler class.",
|
||||
)
|
||||
tools_results: list[dict[str, Any]] = Field(
|
||||
default=[], description="Results of the tools used by the agent."
|
||||
default_factory=list, description="Results of the tools used by the agent."
|
||||
)
|
||||
max_tokens: int | None = Field(
|
||||
default=None, description="Maximum number of tokens for the agent's execution."
|
||||
@@ -265,7 +265,7 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
|
||||
if not mcps:
|
||||
return mcps
|
||||
|
||||
validated_mcps = []
|
||||
validated_mcps: list[str | MCPServerConfig] = []
|
||||
for mcp in mcps:
|
||||
if isinstance(mcp, str):
|
||||
if mcp.startswith(("https://", "crewai-amp:")):
|
||||
|
||||
@@ -51,7 +51,7 @@ if TYPE_CHECKING:
|
||||
from crewai.agent import Agent
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.crew import Crew
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.llm.base_llm import BaseLLM
|
||||
from crewai.task import Task
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.tools.structured_tool import CrewStructuredTool
|
||||
|
||||
@@ -493,206 +493,5 @@ def config_reset():
|
||||
config_command.reset_all_settings()
|
||||
|
||||
|
||||
@crewai.group()
|
||||
def env():
|
||||
"""Environment variable commands."""
|
||||
|
||||
|
||||
@env.command("view")
|
||||
def env_view():
|
||||
"""View tracing-related environment variables."""
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
from rich.table import Table
|
||||
|
||||
console = Console()
|
||||
|
||||
# Check for .env file
|
||||
env_file = Path(".env")
|
||||
env_file_exists = env_file.exists()
|
||||
|
||||
# Create table for environment variables
|
||||
table = Table(show_header=True, header_style="bold cyan", expand=True)
|
||||
table.add_column("Environment Variable", style="cyan", width=30)
|
||||
table.add_column("Value", style="white", width=20)
|
||||
table.add_column("Source", style="yellow", width=20)
|
||||
|
||||
# Check CREWAI_TRACING_ENABLED
|
||||
crewai_tracing = os.getenv("CREWAI_TRACING_ENABLED", "")
|
||||
if crewai_tracing:
|
||||
table.add_row(
|
||||
"CREWAI_TRACING_ENABLED",
|
||||
crewai_tracing,
|
||||
"Environment/Shell",
|
||||
)
|
||||
else:
|
||||
table.add_row(
|
||||
"CREWAI_TRACING_ENABLED",
|
||||
"[dim]Not set[/dim]",
|
||||
"[dim]—[/dim]",
|
||||
)
|
||||
|
||||
# Check other related env vars
|
||||
crewai_testing = os.getenv("CREWAI_TESTING", "")
|
||||
if crewai_testing:
|
||||
table.add_row("CREWAI_TESTING", crewai_testing, "Environment/Shell")
|
||||
|
||||
crewai_user_id = os.getenv("CREWAI_USER_ID", "")
|
||||
if crewai_user_id:
|
||||
table.add_row("CREWAI_USER_ID", crewai_user_id, "Environment/Shell")
|
||||
|
||||
crewai_org_id = os.getenv("CREWAI_ORG_ID", "")
|
||||
if crewai_org_id:
|
||||
table.add_row("CREWAI_ORG_ID", crewai_org_id, "Environment/Shell")
|
||||
|
||||
# Check if .env file exists
|
||||
table.add_row(
|
||||
".env file",
|
||||
"✅ Found" if env_file_exists else "❌ Not found",
|
||||
str(env_file.resolve()) if env_file_exists else "N/A",
|
||||
)
|
||||
|
||||
panel = Panel(
|
||||
table,
|
||||
title="Tracing Environment Variables",
|
||||
border_style="blue",
|
||||
padding=(1, 2),
|
||||
)
|
||||
console.print("\n")
|
||||
console.print(panel)
|
||||
|
||||
# Show helpful message
|
||||
if env_file_exists:
|
||||
console.print(
|
||||
"\n[dim]💡 Tip: To enable tracing via .env, add: CREWAI_TRACING_ENABLED=true[/dim]"
|
||||
)
|
||||
else:
|
||||
console.print(
|
||||
"\n[dim]💡 Tip: Create a .env file in your project root and add: CREWAI_TRACING_ENABLED=true[/dim]"
|
||||
)
|
||||
console.print()
|
||||
|
||||
|
||||
@crewai.group()
|
||||
def traces():
|
||||
"""Trace collection management commands."""
|
||||
|
||||
|
||||
@traces.command("enable")
|
||||
def traces_enable():
|
||||
"""Enable trace collection for crew/flow executions."""
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
|
||||
from crewai.events.listeners.tracing.utils import (
|
||||
_load_user_data,
|
||||
_save_user_data,
|
||||
)
|
||||
|
||||
console = Console()
|
||||
|
||||
# Update user data to enable traces
|
||||
user_data = _load_user_data()
|
||||
user_data["trace_consent"] = True
|
||||
user_data["first_execution_done"] = True
|
||||
_save_user_data(user_data)
|
||||
|
||||
panel = Panel(
|
||||
"✅ Trace collection has been enabled!\n\n"
|
||||
"Your crew/flow executions will now send traces to CrewAI+.\n"
|
||||
"Use 'crewai traces disable' to turn off trace collection.",
|
||||
title="Traces Enabled",
|
||||
border_style="green",
|
||||
padding=(1, 2),
|
||||
)
|
||||
console.print(panel)
|
||||
|
||||
|
||||
@traces.command("disable")
|
||||
def traces_disable():
|
||||
"""Disable trace collection for crew/flow executions."""
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
|
||||
from crewai.events.listeners.tracing.utils import (
|
||||
_load_user_data,
|
||||
_save_user_data,
|
||||
)
|
||||
|
||||
console = Console()
|
||||
|
||||
# Update user data to disable traces
|
||||
user_data = _load_user_data()
|
||||
user_data["trace_consent"] = False
|
||||
user_data["first_execution_done"] = True
|
||||
_save_user_data(user_data)
|
||||
|
||||
panel = Panel(
|
||||
"❌ Trace collection has been disabled!\n\n"
|
||||
"Your crew/flow executions will no longer send traces.\n"
|
||||
"Use 'crewai traces enable' to turn trace collection back on.",
|
||||
title="Traces Disabled",
|
||||
border_style="red",
|
||||
padding=(1, 2),
|
||||
)
|
||||
console.print(panel)
|
||||
|
||||
|
||||
@traces.command("status")
|
||||
def traces_status():
|
||||
"""Show current trace collection status."""
|
||||
import os
|
||||
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
from rich.table import Table
|
||||
|
||||
from crewai.events.listeners.tracing.utils import (
|
||||
_load_user_data,
|
||||
is_tracing_enabled,
|
||||
)
|
||||
|
||||
console = Console()
|
||||
user_data = _load_user_data()
|
||||
|
||||
table = Table(show_header=False, box=None)
|
||||
table.add_column("Setting", style="cyan")
|
||||
table.add_column("Value", style="white")
|
||||
|
||||
# Check environment variable
|
||||
env_enabled = os.getenv("CREWAI_TRACING_ENABLED", "false")
|
||||
table.add_row("CREWAI_TRACING_ENABLED", env_enabled)
|
||||
|
||||
# Check user consent
|
||||
trace_consent = user_data.get("trace_consent")
|
||||
if trace_consent is True:
|
||||
consent_status = "✅ Enabled (user consented)"
|
||||
elif trace_consent is False:
|
||||
consent_status = "❌ Disabled (user declined)"
|
||||
else:
|
||||
consent_status = "⚪ Not set (first-time user)"
|
||||
table.add_row("User Consent", consent_status)
|
||||
|
||||
# Check overall status
|
||||
if is_tracing_enabled():
|
||||
overall_status = "✅ ENABLED"
|
||||
border_style = "green"
|
||||
else:
|
||||
overall_status = "❌ DISABLED"
|
||||
border_style = "red"
|
||||
table.add_row("Overall Status", overall_status)
|
||||
|
||||
panel = Panel(
|
||||
table,
|
||||
title="Trace Collection Status",
|
||||
border_style=border_style,
|
||||
padding=(1, 2),
|
||||
)
|
||||
console.print(panel)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
crewai()
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
import platform
|
||||
import re
|
||||
@@ -12,10 +11,11 @@ import click
|
||||
from packaging import version
|
||||
import tomli
|
||||
|
||||
from crewai.cli.utils import load_env_vars, read_toml
|
||||
from crewai.cli.utils import read_toml
|
||||
from crewai.cli.version import get_crewai_version
|
||||
from crewai.crew import Crew
|
||||
from crewai.llm import LLM, BaseLLM
|
||||
from crewai.llm import LLM
|
||||
from crewai.llm.base_llm import BaseLLM
|
||||
from crewai.types.crew_chat import ChatInputField, ChatInputs
|
||||
from crewai.utilities.llm_utils import create_llm
|
||||
from crewai.utilities.printer import Printer
|
||||
@@ -329,11 +329,6 @@ def load_crew_and_name() -> tuple[Crew, str]:
|
||||
# Get the current working directory
|
||||
cwd = Path.cwd()
|
||||
|
||||
# Load environment variables from .env file before importing the crew module
|
||||
env_vars = load_env_vars(cwd)
|
||||
for key, value in env_vars.items():
|
||||
os.environ.setdefault(key, value)
|
||||
|
||||
# Path to the pyproject.toml file
|
||||
pyproject_path = cwd / "pyproject.toml"
|
||||
if not pyproject_path.exists():
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
from datetime import datetime
|
||||
import os
|
||||
from typing import Any
|
||||
|
||||
from rich.console import Console
|
||||
@@ -7,7 +5,6 @@ from rich.table import Table
|
||||
|
||||
from crewai.cli.command import BaseCommand
|
||||
from crewai.cli.config import HIDDEN_SETTINGS_KEYS, READONLY_SETTINGS_KEYS, Settings
|
||||
from crewai.events.listeners.tracing.utils import _load_user_data
|
||||
|
||||
|
||||
console = Console()
|
||||
@@ -42,42 +39,6 @@ class SettingsCommand(BaseCommand):
|
||||
|
||||
table.add_row(field_name, display_value, description)
|
||||
|
||||
# Add trace-related settings from user data
|
||||
user_data = _load_user_data()
|
||||
|
||||
# CREWAI_TRACING_ENABLED environment variable
|
||||
env_tracing = os.getenv("CREWAI_TRACING_ENABLED", "")
|
||||
env_tracing_display = env_tracing if env_tracing else "Not set"
|
||||
table.add_row(
|
||||
"CREWAI_TRACING_ENABLED",
|
||||
env_tracing_display,
|
||||
"Environment variable to enable/disable tracing",
|
||||
)
|
||||
|
||||
# Trace consent status
|
||||
trace_consent = user_data.get("trace_consent")
|
||||
if trace_consent is True:
|
||||
consent_display = "✅ Enabled"
|
||||
elif trace_consent is False:
|
||||
consent_display = "❌ Disabled"
|
||||
else:
|
||||
consent_display = "Not set"
|
||||
table.add_row(
|
||||
"trace_consent", consent_display, "Whether trace collection is enabled"
|
||||
)
|
||||
|
||||
# First execution timestamp
|
||||
if user_data.get("first_execution_at"):
|
||||
timestamp = datetime.fromtimestamp(user_data["first_execution_at"])
|
||||
first_exec_display = timestamp.strftime("%Y-%m-%d %H:%M:%S")
|
||||
else:
|
||||
first_exec_display = "Not set"
|
||||
table.add_row(
|
||||
"first_execution_at",
|
||||
first_exec_display,
|
||||
"Timestamp of first crew/flow execution",
|
||||
)
|
||||
|
||||
console.print(table)
|
||||
|
||||
def set(self, key: str, value: str) -> None:
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]==1.5.0"
|
||||
"crewai[tools]==1.4.1"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]==1.5.0"
|
||||
"crewai[tools]==1.4.1"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -27,8 +27,6 @@ from pydantic import (
|
||||
model_validator,
|
||||
)
|
||||
from pydantic_core import PydanticCustomError
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
from typing_extensions import Self
|
||||
|
||||
from crewai.agent import Agent
|
||||
@@ -41,8 +39,8 @@ from crewai.events.listeners.tracing.trace_listener import (
|
||||
TraceCollectionListener,
|
||||
)
|
||||
from crewai.events.listeners.tracing.utils import (
|
||||
set_tracing_enabled,
|
||||
should_enable_tracing,
|
||||
is_tracing_enabled,
|
||||
should_auto_collect_first_time_traces,
|
||||
)
|
||||
from crewai.events.types.crew_events import (
|
||||
CrewKickoffCompletedEvent,
|
||||
@@ -58,8 +56,8 @@ from crewai.events.types.crew_events import (
|
||||
from crewai.flow.flow_trackable import FlowTrackable
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.llm import LLM
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.llm.base_llm import BaseLLM
|
||||
from crewai.llm.core import LLM
|
||||
from crewai.memory.entity.entity_memory import EntityMemory
|
||||
from crewai.memory.external.external_memory import ExternalMemory
|
||||
from crewai.memory.long_term.long_term_memory import LongTermMemory
|
||||
@@ -282,8 +280,8 @@ class Crew(FlowTrackable, BaseModel):
|
||||
description="Metrics for the LLM usage during all tasks execution.",
|
||||
)
|
||||
tracing: bool | None = Field(
|
||||
default=None,
|
||||
description="Whether to enable tracing for the crew. True=always enable, False=always disable, None=check environment/user settings.",
|
||||
default=False,
|
||||
description="Whether to enable tracing for the crew.",
|
||||
)
|
||||
|
||||
@field_validator("id", mode="before")
|
||||
@@ -313,16 +311,17 @@ class Crew(FlowTrackable, BaseModel):
|
||||
@model_validator(mode="after")
|
||||
def set_private_attrs(self) -> Crew:
|
||||
"""set private attributes."""
|
||||
|
||||
self._cache_handler = CacheHandler()
|
||||
event_listener = EventListener() # type: ignore[no-untyped-call]
|
||||
|
||||
# Determine and set tracing state once for this execution
|
||||
tracing_enabled = should_enable_tracing(override=self.tracing)
|
||||
set_tracing_enabled(tracing_enabled)
|
||||
|
||||
# Always setup trace listener - actual execution control is via contextvar
|
||||
trace_listener = TraceCollectionListener()
|
||||
trace_listener.setup_listeners(crewai_event_bus)
|
||||
if (
|
||||
is_tracing_enabled()
|
||||
or self.tracing
|
||||
or should_auto_collect_first_time_traces()
|
||||
):
|
||||
trace_listener = TraceCollectionListener()
|
||||
trace_listener.setup_listeners(crewai_event_bus)
|
||||
event_listener.verbose = self.verbose
|
||||
event_listener.formatter.verbose = self.verbose
|
||||
self._logger = Logger(verbose=self.verbose)
|
||||
@@ -1172,10 +1171,6 @@ class Crew(FlowTrackable, BaseModel):
|
||||
total_tokens=self.token_usage.total_tokens,
|
||||
),
|
||||
)
|
||||
|
||||
# Finalization is handled by trace listener (always initialized)
|
||||
# The batch manager checks contextvar to determine if tracing is enabled
|
||||
|
||||
return CrewOutput(
|
||||
raw=final_task_output.raw,
|
||||
pydantic=final_task_output.pydantic,
|
||||
@@ -1656,32 +1651,3 @@ class Crew(FlowTrackable, BaseModel):
|
||||
and able_to_inject
|
||||
):
|
||||
self.tasks[0].allow_crewai_trigger_context = True
|
||||
|
||||
def _show_tracing_disabled_message(self) -> None:
|
||||
"""Show a message when tracing is disabled."""
|
||||
from crewai.events.listeners.tracing.utils import has_user_declined_tracing
|
||||
|
||||
console = Console()
|
||||
|
||||
if has_user_declined_tracing():
|
||||
message = """Info: Tracing is disabled.
|
||||
|
||||
To enable tracing, do any one of these:
|
||||
• Set tracing=True in your Crew code
|
||||
• Set CREWAI_TRACING_ENABLED=true in your project's .env file
|
||||
• Run: crewai traces enable"""
|
||||
else:
|
||||
message = """Info: Tracing is disabled.
|
||||
|
||||
To enable tracing, do any one of these:
|
||||
• Set tracing=True in your Crew code
|
||||
• Set CREWAI_TRACING_ENABLED=true in your project's .env file
|
||||
• Run: crewai traces enable"""
|
||||
|
||||
panel = Panel(
|
||||
message,
|
||||
title="Tracing Status",
|
||||
border_style="blue",
|
||||
padding=(1, 2),
|
||||
)
|
||||
console.print(panel)
|
||||
|
||||
@@ -10,7 +10,6 @@ import atexit
|
||||
from collections.abc import Callable, Generator
|
||||
from concurrent.futures import Future, ThreadPoolExecutor
|
||||
from contextlib import contextmanager
|
||||
import contextvars
|
||||
import threading
|
||||
from typing import Any, Final, ParamSpec, TypeVar
|
||||
|
||||
@@ -289,9 +288,8 @@ class CrewAIEventsBus:
|
||||
if event_type is LLMStreamChunkEvent:
|
||||
self._call_handlers(source, event, level_sync)
|
||||
else:
|
||||
ctx = contextvars.copy_context()
|
||||
future = self._sync_executor.submit(
|
||||
ctx.run, self._call_handlers, source, event, level_sync
|
||||
self._call_handlers, source, event, level_sync
|
||||
)
|
||||
await asyncio.get_running_loop().run_in_executor(
|
||||
None, future.result
|
||||
@@ -348,9 +346,8 @@ class CrewAIEventsBus:
|
||||
if event_type is LLMStreamChunkEvent:
|
||||
self._call_handlers(source, event, sync_handlers)
|
||||
else:
|
||||
ctx = contextvars.copy_context()
|
||||
sync_future = self._sync_executor.submit(
|
||||
ctx.run, self._call_handlers, source, event, sync_handlers
|
||||
self._call_handlers, source, event, sync_handlers
|
||||
)
|
||||
if not async_handlers:
|
||||
return sync_future
|
||||
|
||||
@@ -89,7 +89,7 @@ from crewai.events.types.tool_usage_events import (
|
||||
ToolUsageStartedEvent,
|
||||
)
|
||||
from crewai.events.utils.console_formatter import ConsoleFormatter
|
||||
from crewai.llm import LLM
|
||||
from crewai.llm.core import LLM
|
||||
from crewai.task import Task
|
||||
from crewai.telemetry.telemetry import Telemetry
|
||||
from crewai.utilities import Logger
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import logging
|
||||
from pathlib import Path
|
||||
import uuid
|
||||
import webbrowser
|
||||
|
||||
@@ -16,6 +17,47 @@ from crewai.events.listeners.tracing.utils import (
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _update_or_create_env_file():
|
||||
"""Update or create .env file with CREWAI_TRACING_ENABLED=true."""
|
||||
env_path = Path(".env")
|
||||
env_content = ""
|
||||
variable_name = "CREWAI_TRACING_ENABLED"
|
||||
variable_value = "true"
|
||||
|
||||
# Read existing content if file exists
|
||||
if env_path.exists():
|
||||
with open(env_path, "r") as f:
|
||||
env_content = f.read()
|
||||
|
||||
# Check if CREWAI_TRACING_ENABLED is already set
|
||||
lines = env_content.splitlines()
|
||||
variable_exists = False
|
||||
updated_lines = []
|
||||
|
||||
for line in lines:
|
||||
if line.strip().startswith(f"{variable_name}="):
|
||||
# Update existing variable
|
||||
updated_lines.append(f"{variable_name}={variable_value}")
|
||||
variable_exists = True
|
||||
else:
|
||||
updated_lines.append(line)
|
||||
|
||||
# Add variable if it doesn't exist
|
||||
if not variable_exists:
|
||||
if updated_lines and not updated_lines[-1].strip():
|
||||
# If last line is empty, replace it
|
||||
updated_lines[-1] = f"{variable_name}={variable_value}"
|
||||
else:
|
||||
# Add new line and then the variable
|
||||
updated_lines.append(f"{variable_name}={variable_value}")
|
||||
|
||||
# Write updated content
|
||||
with open(env_path, "w") as f:
|
||||
f.write("\n".join(updated_lines))
|
||||
if updated_lines: # Add final newline if there's content
|
||||
f.write("\n")
|
||||
|
||||
|
||||
class FirstTimeTraceHandler:
|
||||
"""Handles the first-time user trace collection and display flow."""
|
||||
|
||||
@@ -54,16 +96,20 @@ class FirstTimeTraceHandler:
|
||||
if user_wants_traces:
|
||||
self._initialize_backend_and_send_events()
|
||||
|
||||
# Enable tracing for future runs by updating .env file
|
||||
try:
|
||||
_update_or_create_env_file()
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
|
||||
if self.ephemeral_url:
|
||||
self._display_ephemeral_trace_link()
|
||||
else:
|
||||
self._show_tracing_declined_message()
|
||||
|
||||
mark_first_execution_completed(user_consented=user_wants_traces)
|
||||
mark_first_execution_completed()
|
||||
|
||||
except Exception as e:
|
||||
self._gracefully_fail(f"Error in trace handling: {e}")
|
||||
mark_first_execution_completed(user_consented=False)
|
||||
mark_first_execution_completed()
|
||||
|
||||
def _initialize_backend_and_send_events(self):
|
||||
"""Initialize backend batch and send collected events."""
|
||||
@@ -136,13 +182,8 @@ This trace shows:
|
||||
• Tool usage and results
|
||||
• LLM calls and responses
|
||||
|
||||
✅ Tracing has been enabled for future runs!
|
||||
Your preference has been saved. Future Crew/Flow executions will automatically collect traces.
|
||||
|
||||
To disable tracing later, do any one of these:
|
||||
• Set tracing=False in your Crew/Flow code
|
||||
• Set CREWAI_TRACING_ENABLED=false in your project's .env file
|
||||
• Run: crewai traces disable
|
||||
✅ Tracing has been enabled for future runs! (CREWAI_TRACING_ENABLED=true added to .env)
|
||||
You can also add tracing=True to your Crew(tracing=True) / Flow(tracing=True) for more control.
|
||||
|
||||
📝 Note: This link will expire in 24 hours.
|
||||
""".strip()
|
||||
@@ -158,32 +199,6 @@ To disable tracing later, do any one of these:
|
||||
console.print(panel)
|
||||
console.print()
|
||||
|
||||
def _show_tracing_declined_message(self):
|
||||
"""Show message when user declines tracing."""
|
||||
console = Console()
|
||||
|
||||
panel_content = """
|
||||
Info: Tracing has been disabled.
|
||||
|
||||
Your preference has been saved. Future Crew/Flow executions will not collect traces.
|
||||
|
||||
To enable tracing later, do any one of these:
|
||||
• Set tracing=True in your Crew/Flow code
|
||||
• Set CREWAI_TRACING_ENABLED=true in your project's .env file
|
||||
• Run: crewai traces enable
|
||||
""".strip()
|
||||
|
||||
panel = Panel(
|
||||
panel_content,
|
||||
title="Tracing Preference Saved",
|
||||
border_style="blue",
|
||||
padding=(1, 2),
|
||||
)
|
||||
|
||||
console.print("\n")
|
||||
console.print(panel)
|
||||
console.print()
|
||||
|
||||
def _gracefully_fail(self, error_message: str):
|
||||
"""Handle errors gracefully without disrupting user experience."""
|
||||
console = Console()
|
||||
@@ -203,14 +218,8 @@ Unfortunately, we couldn't upload them to the server right now, but here's what
|
||||
• Execution duration: {self.batch_manager.calculate_duration("execution")}ms
|
||||
• Batch ID: {self.batch_manager.trace_batch_id}
|
||||
|
||||
✅ Tracing has been enabled for future runs!
|
||||
Your preference has been saved. Future Crew/Flow executions will automatically collect traces.
|
||||
Tracing has been enabled for future runs! (CREWAI_TRACING_ENABLED=true added to .env)
|
||||
The traces include agent decisions, task execution, and tool usage.
|
||||
|
||||
To disable tracing later, do any one of these:
|
||||
• Set tracing=False in your Crew/Flow code
|
||||
• Set CREWAI_TRACING_ENABLED=false in your project's .env file
|
||||
• Run: crewai traces disable
|
||||
""".strip()
|
||||
|
||||
panel = Panel(
|
||||
|
||||
@@ -12,10 +12,7 @@ from crewai.cli.authentication.token import AuthError, get_auth_token
|
||||
from crewai.cli.plus_api import PlusAPI
|
||||
from crewai.cli.version import get_crewai_version
|
||||
from crewai.events.listeners.tracing.types import TraceEvent
|
||||
from crewai.events.listeners.tracing.utils import (
|
||||
is_tracing_enabled_in_context,
|
||||
should_auto_collect_first_time_traces,
|
||||
)
|
||||
from crewai.events.listeners.tracing.utils import should_auto_collect_first_time_traces
|
||||
from crewai.utilities.constants import CREWAI_BASE_URL
|
||||
|
||||
|
||||
@@ -110,9 +107,6 @@ class TraceBatchManager:
|
||||
):
|
||||
"""Send batch initialization to backend"""
|
||||
|
||||
if not is_tracing_enabled_in_context():
|
||||
return
|
||||
|
||||
if not self.plus_api or not self.current_batch:
|
||||
return
|
||||
|
||||
@@ -249,8 +243,7 @@ class TraceBatchManager:
|
||||
|
||||
def finalize_batch(self) -> TraceBatch | None:
|
||||
"""Finalize batch and return it for sending"""
|
||||
|
||||
if not self.current_batch or not is_tracing_enabled_in_context():
|
||||
if not self.current_batch:
|
||||
return None
|
||||
|
||||
all_handlers_completed = self.wait_for_pending_events()
|
||||
|
||||
@@ -10,14 +10,13 @@ from crewai.cli.authentication.token import AuthError, get_auth_token
|
||||
from crewai.cli.version import get_crewai_version
|
||||
from crewai.events.base_event_listener import BaseEventListener
|
||||
from crewai.events.event_bus import CrewAIEventsBus
|
||||
from crewai.events.utils.console_formatter import ConsoleFormatter
|
||||
from crewai.events.listeners.tracing.first_time_trace_handler import (
|
||||
FirstTimeTraceHandler,
|
||||
)
|
||||
from crewai.events.listeners.tracing.trace_batch_manager import TraceBatchManager
|
||||
from crewai.events.listeners.tracing.types import TraceEvent
|
||||
from crewai.events.listeners.tracing.utils import (
|
||||
safe_serialize_to_dict,
|
||||
)
|
||||
from crewai.events.listeners.tracing.utils import safe_serialize_to_dict
|
||||
from crewai.events.types.agent_events import (
|
||||
AgentExecutionCompletedEvent,
|
||||
AgentExecutionErrorEvent,
|
||||
@@ -81,7 +80,6 @@ from crewai.events.types.tool_usage_events import (
|
||||
ToolUsageFinishedEvent,
|
||||
ToolUsageStartedEvent,
|
||||
)
|
||||
from crewai.events.utils.console_formatter import ConsoleFormatter
|
||||
|
||||
|
||||
class TraceCollectionListener(BaseEventListener):
|
||||
@@ -629,35 +627,3 @@ class TraceCollectionListener(BaseEventListener):
|
||||
"event": safe_serialize_to_dict(event),
|
||||
"source": source,
|
||||
}
|
||||
|
||||
def _show_tracing_disabled_message(self) -> None:
|
||||
"""Show a message when tracing is disabled."""
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
|
||||
from crewai.events.listeners.tracing.utils import has_user_declined_tracing
|
||||
|
||||
console = Console()
|
||||
|
||||
if has_user_declined_tracing():
|
||||
message = """Info: Tracing is disabled.
|
||||
|
||||
To enable tracing, do any one of these:
|
||||
• Set tracing=True in your Crew/Flow code
|
||||
• Set CREWAI_TRACING_ENABLED=true in your project's .env file
|
||||
• Run: crewai traces enable"""
|
||||
else:
|
||||
message = """Info: Tracing is disabled.
|
||||
|
||||
To enable tracing, do any one of these:
|
||||
• Set tracing=True in your Crew/Flow code
|
||||
• Set CREWAI_TRACING_ENABLED=true in your project's .env file
|
||||
• Run: crewai traces enable"""
|
||||
|
||||
panel = Panel(
|
||||
message,
|
||||
title="Tracing Status",
|
||||
border_style="blue",
|
||||
padding=(1, 2),
|
||||
)
|
||||
console.print(panel)
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
from contextvars import ContextVar, Token
|
||||
from datetime import datetime
|
||||
import getpass
|
||||
import hashlib
|
||||
@@ -9,7 +8,7 @@ from pathlib import Path
|
||||
import platform
|
||||
import re
|
||||
import subprocess
|
||||
from typing import Any, cast
|
||||
from typing import Any
|
||||
import uuid
|
||||
|
||||
import click
|
||||
@@ -24,120 +23,7 @@ from crewai.utilities.serialization import to_serializable
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
_tracing_enabled: ContextVar[bool | None] = ContextVar("_tracing_enabled", default=None)
|
||||
|
||||
|
||||
def should_enable_tracing(*, override: bool | None = None) -> bool:
|
||||
"""Determine if tracing should be enabled.
|
||||
|
||||
This is the single source of truth for tracing enablement.
|
||||
Priority order:
|
||||
1. Explicit override (e.g., Crew.tracing=True/False)
|
||||
2. Environment variable CREWAI_TRACING_ENABLED
|
||||
3. User consent from user_data
|
||||
|
||||
Args:
|
||||
override: Explicit override for tracing (True=always enable, False=always disable, None=check other settings)
|
||||
|
||||
Returns:
|
||||
True if tracing should be enabled, False otherwise.
|
||||
"""
|
||||
if override is True:
|
||||
return True
|
||||
if override is False:
|
||||
return False
|
||||
|
||||
env_value = os.getenv("CREWAI_TRACING_ENABLED", "").lower()
|
||||
if env_value in ("true", "1"):
|
||||
return True
|
||||
|
||||
data = _load_user_data()
|
||||
|
||||
if data.get("trace_consent", False) is not False:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def set_tracing_enabled(enabled: bool) -> object:
|
||||
"""Set tracing enabled state for current execution context.
|
||||
|
||||
Args:
|
||||
enabled: Whether tracing should be enabled
|
||||
|
||||
Returns:
|
||||
A token that can be used with reset_tracing_enabled to restore previous value.
|
||||
"""
|
||||
return _tracing_enabled.set(enabled)
|
||||
|
||||
|
||||
def reset_tracing_enabled(token: Token[bool | None]) -> None:
|
||||
"""Reset tracing enabled state to previous value.
|
||||
|
||||
Args:
|
||||
token: Token returned from set_tracing_enabled
|
||||
"""
|
||||
_tracing_enabled.reset(token)
|
||||
|
||||
|
||||
def is_tracing_enabled_in_context() -> bool:
|
||||
"""Check if tracing is enabled in current execution context.
|
||||
|
||||
Returns:
|
||||
True if tracing is enabled in context, False otherwise.
|
||||
Returns False if context has not been set.
|
||||
"""
|
||||
enabled = _tracing_enabled.get()
|
||||
return enabled if enabled is not None else False
|
||||
|
||||
|
||||
def _user_data_file() -> Path:
|
||||
base = Path(db_storage_path())
|
||||
base.mkdir(parents=True, exist_ok=True)
|
||||
return base / ".crewai_user.json"
|
||||
|
||||
|
||||
def _load_user_data() -> dict[str, Any]:
|
||||
p = _user_data_file()
|
||||
if p.exists():
|
||||
try:
|
||||
return cast(dict[str, Any], json.loads(p.read_text()))
|
||||
except (json.JSONDecodeError, OSError, PermissionError) as e:
|
||||
logger.warning(f"Failed to load user data: {e}")
|
||||
return {}
|
||||
|
||||
|
||||
def _save_user_data(data: dict[str, Any]) -> None:
|
||||
try:
|
||||
p = _user_data_file()
|
||||
p.write_text(json.dumps(data, indent=2))
|
||||
except (OSError, PermissionError) as e:
|
||||
logger.warning(f"Failed to save user data: {e}")
|
||||
|
||||
|
||||
def has_user_declined_tracing() -> bool:
|
||||
"""Check if user has explicitly declined trace collection.
|
||||
|
||||
Returns:
|
||||
True if user previously declined tracing, False otherwise.
|
||||
"""
|
||||
data = _load_user_data()
|
||||
if data.get("first_execution_done", False):
|
||||
return data.get("trace_consent", False) is False
|
||||
return False
|
||||
|
||||
|
||||
def is_tracing_enabled() -> bool:
|
||||
"""Check if tracing should be enabled.
|
||||
|
||||
|
||||
Returns:
|
||||
True if tracing is enabled and not disabled, False otherwise.
|
||||
"""
|
||||
# If user has explicitly declined tracing, never enable it
|
||||
if has_user_declined_tracing():
|
||||
return False
|
||||
|
||||
return os.getenv("CREWAI_TRACING_ENABLED", "false").lower() == "true"
|
||||
|
||||
|
||||
@@ -327,12 +213,36 @@ def _get_generic_system_id() -> str | None:
|
||||
return None
|
||||
|
||||
|
||||
def _user_data_file() -> Path:
|
||||
base = Path(db_storage_path())
|
||||
base.mkdir(parents=True, exist_ok=True)
|
||||
return base / ".crewai_user.json"
|
||||
|
||||
|
||||
def _load_user_data() -> dict:
|
||||
p = _user_data_file()
|
||||
if p.exists():
|
||||
try:
|
||||
return json.loads(p.read_text())
|
||||
except (json.JSONDecodeError, OSError, PermissionError) as e:
|
||||
logger.warning(f"Failed to load user data: {e}")
|
||||
return {}
|
||||
|
||||
|
||||
def _save_user_data(data: dict) -> None:
|
||||
try:
|
||||
p = _user_data_file()
|
||||
p.write_text(json.dumps(data, indent=2))
|
||||
except (OSError, PermissionError) as e:
|
||||
logger.warning(f"Failed to save user data: {e}")
|
||||
|
||||
|
||||
def get_user_id() -> str:
|
||||
"""Stable, anonymized user identifier with caching."""
|
||||
data = _load_user_data()
|
||||
|
||||
if "user_id" in data:
|
||||
return cast(str, data["user_id"])
|
||||
return data["user_id"]
|
||||
|
||||
try:
|
||||
username = getpass.getuser()
|
||||
@@ -353,12 +263,8 @@ def is_first_execution() -> bool:
|
||||
return not data.get("first_execution_done", False)
|
||||
|
||||
|
||||
def mark_first_execution_done(user_consented: bool = False) -> None:
|
||||
"""Mark that the first execution has been completed.
|
||||
|
||||
Args:
|
||||
user_consented: Whether the user consented to trace collection.
|
||||
"""
|
||||
def mark_first_execution_done() -> None:
|
||||
"""Mark that the first execution has been completed."""
|
||||
data = _load_user_data()
|
||||
if data.get("first_execution_done", False):
|
||||
return
|
||||
@@ -369,13 +275,12 @@ def mark_first_execution_done(user_consented: bool = False) -> None:
|
||||
"first_execution_at": datetime.now().timestamp(),
|
||||
"user_id": get_user_id(),
|
||||
"machine_id": _get_machine_id(),
|
||||
"trace_consent": user_consented,
|
||||
}
|
||||
)
|
||||
_save_user_data(data)
|
||||
|
||||
|
||||
def safe_serialize_to_dict(obj: Any, exclude: set[str] | None = None) -> dict[str, Any]:
|
||||
def safe_serialize_to_dict(obj, exclude: set[str] | None = None) -> dict[str, Any]:
|
||||
"""Safely serialize an object to a dictionary for event data."""
|
||||
try:
|
||||
serialized = to_serializable(obj, exclude)
|
||||
@@ -386,9 +291,7 @@ def safe_serialize_to_dict(obj: Any, exclude: set[str] | None = None) -> dict[st
|
||||
return {"serialization_error": str(e), "object_type": type(obj).__name__}
|
||||
|
||||
|
||||
def truncate_messages(
|
||||
messages: list[dict[str, Any]], max_content_length: int = 500, max_messages: int = 5
|
||||
) -> list[dict[str, Any]]:
|
||||
def truncate_messages(messages, max_content_length=500, max_messages=5):
|
||||
"""Truncate message content and limit number of messages"""
|
||||
if not messages or not isinstance(messages, list):
|
||||
return messages
|
||||
@@ -405,22 +308,9 @@ def truncate_messages(
|
||||
|
||||
|
||||
def should_auto_collect_first_time_traces() -> bool:
|
||||
"""True if we should auto-collect traces for first-time user.
|
||||
|
||||
|
||||
Returns:
|
||||
True if first-time user AND telemetry not disabled AND tracing not explicitly enabled, False otherwise.
|
||||
"""
|
||||
"""True if we should auto-collect traces for first-time user."""
|
||||
if _is_test_environment():
|
||||
return False
|
||||
|
||||
# If user has previously declined, never auto-collect
|
||||
if has_user_declined_tracing():
|
||||
return False
|
||||
|
||||
if is_tracing_enabled_in_context():
|
||||
return False
|
||||
|
||||
return is_first_execution()
|
||||
|
||||
|
||||
@@ -465,7 +355,7 @@ def prompt_user_for_trace_viewing(timeout_seconds: int = 20) -> bool:
|
||||
|
||||
result = [False]
|
||||
|
||||
def get_input() -> None:
|
||||
def get_input():
|
||||
try:
|
||||
response = input().strip().lower()
|
||||
result[0] = response in ["y", "yes"]
|
||||
@@ -487,10 +377,6 @@ def prompt_user_for_trace_viewing(timeout_seconds: int = 20) -> bool:
|
||||
return False
|
||||
|
||||
|
||||
def mark_first_execution_completed(user_consented: bool = False) -> None:
|
||||
"""Mark first execution as completed (called after trace prompt).
|
||||
|
||||
Args:
|
||||
user_consented: Whether the user consented to trace collection.
|
||||
"""
|
||||
mark_first_execution_done(user_consented=user_consented)
|
||||
def mark_first_execution_completed() -> None:
|
||||
"""Mark first execution as completed (called after trace prompt)."""
|
||||
mark_first_execution_done()
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import threading
|
||||
from typing import Any, ClassVar
|
||||
|
||||
from rich.console import Console
|
||||
@@ -28,7 +27,6 @@ class ConsoleFormatter:
|
||||
_pending_a2a_turn_number: int | None = None
|
||||
_a2a_turn_branches: ClassVar[dict[int, Tree]] = {}
|
||||
_current_a2a_agent_name: str | None = None
|
||||
crew_completion_printed: ClassVar[threading.Event] = threading.Event()
|
||||
|
||||
def __init__(self, verbose: bool = False):
|
||||
self.console = Console(width=None)
|
||||
@@ -49,44 +47,13 @@ class ConsoleFormatter:
|
||||
padding=(1, 2),
|
||||
)
|
||||
|
||||
def _show_tracing_disabled_message_if_needed(self) -> None:
|
||||
"""Show tracing disabled message if tracing is not enabled."""
|
||||
from crewai.events.listeners.tracing.utils import (
|
||||
has_user_declined_tracing,
|
||||
is_tracing_enabled_in_context,
|
||||
)
|
||||
|
||||
if not is_tracing_enabled_in_context():
|
||||
if has_user_declined_tracing():
|
||||
message = """Info: Tracing is disabled.
|
||||
|
||||
To enable tracing, do any one of these:
|
||||
• Set tracing=True in your Crew/Flow code
|
||||
• Set CREWAI_TRACING_ENABLED=true in your project's .env file
|
||||
• Run: crewai traces enable"""
|
||||
else:
|
||||
message = """Info: Tracing is disabled.
|
||||
|
||||
To enable tracing, do any one of these:
|
||||
• Set tracing=True in your Crew/Flow code
|
||||
• Set CREWAI_TRACING_ENABLED=true in your project's .env file
|
||||
• Run: crewai traces enable"""
|
||||
|
||||
panel = Panel(
|
||||
message,
|
||||
title="Tracing Status",
|
||||
border_style="blue",
|
||||
padding=(1, 2),
|
||||
)
|
||||
self.console.print(panel)
|
||||
|
||||
def create_status_content(
|
||||
self,
|
||||
title: str,
|
||||
name: str,
|
||||
status_style: str = "blue",
|
||||
tool_args: dict[str, Any] | str = "",
|
||||
**fields: Any,
|
||||
**fields,
|
||||
) -> Text:
|
||||
"""Create standardized status content with consistent formatting."""
|
||||
content = Text()
|
||||
@@ -125,7 +92,7 @@ To enable tracing, do any one of these:
|
||||
"""Add a node to the tree with consistent styling."""
|
||||
return parent.add(Text(text, style=style))
|
||||
|
||||
def print(self, *args: Any, **kwargs: Any) -> None:
|
||||
def print(self, *args, **kwargs) -> None:
|
||||
"""Custom print that replaces consecutive Tree renders.
|
||||
|
||||
* If the argument is a single ``Tree`` instance, we either start a
|
||||
@@ -241,20 +208,11 @@ To enable tracing, do any one of these:
|
||||
|
||||
self.print_panel(content, title, style)
|
||||
|
||||
if status in ["completed", "failed"]:
|
||||
self.crew_completion_printed.set()
|
||||
|
||||
# Show tracing disabled message after crew completion
|
||||
self._show_tracing_disabled_message_if_needed()
|
||||
|
||||
def create_crew_tree(self, crew_name: str, source_id: str) -> Tree | None:
|
||||
"""Create and initialize a new crew tree with initial status."""
|
||||
if not self.verbose:
|
||||
return None
|
||||
|
||||
# Reset the crew completion event for this new crew execution
|
||||
ConsoleFormatter.crew_completion_printed.clear()
|
||||
|
||||
tree = Tree(
|
||||
Text("🚀 Crew: ", style="cyan bold") + Text(crew_name, style="cyan")
|
||||
)
|
||||
@@ -539,7 +497,7 @@ To enable tracing, do any one of these:
|
||||
|
||||
return method_branch
|
||||
|
||||
def get_llm_tree(self, tool_name: str) -> Tree:
|
||||
def get_llm_tree(self, tool_name: str):
|
||||
text = Text()
|
||||
text.append(f"🔧 Using {tool_name} from LLM available_function", style="yellow")
|
||||
|
||||
@@ -554,7 +512,7 @@ To enable tracing, do any one of these:
|
||||
self,
|
||||
tool_name: str,
|
||||
tool_args: dict[str, Any] | str,
|
||||
) -> None:
|
||||
):
|
||||
# Create status content for the tool usage
|
||||
content = self.create_status_content(
|
||||
"Tool Usage Started", tool_name, Status="In Progress", tool_args=tool_args
|
||||
@@ -570,7 +528,7 @@ To enable tracing, do any one of these:
|
||||
def handle_llm_tool_usage_finished(
|
||||
self,
|
||||
tool_name: str,
|
||||
) -> None:
|
||||
):
|
||||
tree = self.get_llm_tree(tool_name)
|
||||
self.add_tree_node(tree, "✅ Tool Usage Completed", "green")
|
||||
self.print(tree)
|
||||
@@ -580,7 +538,7 @@ To enable tracing, do any one of these:
|
||||
self,
|
||||
tool_name: str,
|
||||
error: str,
|
||||
) -> None:
|
||||
):
|
||||
tree = self.get_llm_tree(tool_name)
|
||||
self.add_tree_node(tree, "❌ Tool Usage Failed", "red")
|
||||
self.print(tree)
|
||||
@@ -1600,7 +1558,7 @@ To enable tracing, do any one of these:
|
||||
if branch_to_use is None and tree_to_use is not None:
|
||||
branch_to_use = tree_to_use
|
||||
|
||||
def add_panel() -> None:
|
||||
def add_panel():
|
||||
memory_text = str(memory_content)
|
||||
if len(memory_text) > 500:
|
||||
memory_text = memory_text[:497] + "..."
|
||||
|
||||
@@ -7,7 +7,7 @@ from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.llm import BaseLLM
|
||||
from crewai.llm.base_llm import BaseLLM
|
||||
from crewai.task import Task
|
||||
from crewai.utilities.llm_utils import create_llm
|
||||
|
||||
|
||||
@@ -26,17 +26,14 @@ from uuid import uuid4
|
||||
from opentelemetry import baggage
|
||||
from opentelemetry.context import attach, detach
|
||||
from pydantic import BaseModel, Field, ValidationError
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.listeners.tracing.trace_listener import (
|
||||
TraceCollectionListener,
|
||||
)
|
||||
from crewai.events.listeners.tracing.utils import (
|
||||
has_user_declined_tracing,
|
||||
set_tracing_enabled,
|
||||
should_enable_tracing,
|
||||
is_tracing_enabled,
|
||||
should_auto_collect_first_time_traces,
|
||||
)
|
||||
from crewai.events.types.flow_events import (
|
||||
FlowCreatedEvent,
|
||||
@@ -455,7 +452,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
_router_paths: ClassVar[dict[FlowMethodName, list[FlowMethodName]]] = {}
|
||||
initial_state: type[T] | T | None = None
|
||||
name: str | None = None
|
||||
tracing: bool | None = None
|
||||
tracing: bool | None = False
|
||||
|
||||
def __class_getitem__(cls: type[Flow[T]], item: type[T]) -> type[Flow[T]]:
|
||||
class _FlowGeneric(cls): # type: ignore
|
||||
@@ -467,14 +464,13 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
def __init__(
|
||||
self,
|
||||
persistence: FlowPersistence | None = None,
|
||||
tracing: bool | None = None,
|
||||
tracing: bool | None = False,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize a new Flow instance.
|
||||
|
||||
Args:
|
||||
persistence: Optional persistence backend for storing flow states
|
||||
tracing: Whether to enable tracing. True=always enable, False=always disable, None=check environment/user settings
|
||||
**kwargs: Additional state values to initialize or override
|
||||
"""
|
||||
# Initialize basic instance attributes
|
||||
@@ -492,11 +488,13 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
# Initialize state with initial values
|
||||
self._state = self._create_initial_state()
|
||||
self.tracing = tracing
|
||||
tracing_enabled = should_enable_tracing(override=self.tracing)
|
||||
set_tracing_enabled(tracing_enabled)
|
||||
|
||||
trace_listener = TraceCollectionListener()
|
||||
trace_listener.setup_listeners(crewai_event_bus)
|
||||
if (
|
||||
is_tracing_enabled()
|
||||
or self.tracing
|
||||
or should_auto_collect_first_time_traces()
|
||||
):
|
||||
trace_listener = TraceCollectionListener()
|
||||
trace_listener.setup_listeners(crewai_event_bus)
|
||||
# Apply any additional kwargs
|
||||
if kwargs:
|
||||
self._initialize_state(kwargs)
|
||||
@@ -938,13 +936,18 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
)
|
||||
self._event_futures.clear()
|
||||
|
||||
trace_listener = TraceCollectionListener()
|
||||
if trace_listener.batch_manager.batch_owner_type == "flow":
|
||||
if trace_listener.first_time_handler.is_first_time:
|
||||
trace_listener.first_time_handler.mark_events_collected()
|
||||
trace_listener.first_time_handler.handle_execution_completion()
|
||||
else:
|
||||
trace_listener.batch_manager.finalize_batch()
|
||||
if (
|
||||
is_tracing_enabled()
|
||||
or self.tracing
|
||||
or should_auto_collect_first_time_traces()
|
||||
):
|
||||
trace_listener = TraceCollectionListener()
|
||||
if trace_listener.batch_manager.batch_owner_type == "flow":
|
||||
if trace_listener.first_time_handler.is_first_time:
|
||||
trace_listener.first_time_handler.mark_events_collected()
|
||||
trace_listener.first_time_handler.handle_execution_completion()
|
||||
else:
|
||||
trace_listener.batch_manager.finalize_batch()
|
||||
|
||||
return final_output
|
||||
finally:
|
||||
@@ -1378,32 +1381,3 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
)
|
||||
structure = build_flow_structure(self)
|
||||
return render_interactive(structure, filename=filename, show=show)
|
||||
|
||||
@staticmethod
|
||||
def _show_tracing_disabled_message() -> None:
|
||||
"""Show a message when tracing is disabled."""
|
||||
|
||||
console = Console()
|
||||
|
||||
if has_user_declined_tracing():
|
||||
message = """Info: Tracing is disabled.
|
||||
|
||||
To enable tracing, do any one of these:
|
||||
• Set tracing=True in your Flow code
|
||||
• Set CREWAI_TRACING_ENABLED=true in your project's .env file
|
||||
• Run: crewai traces enable"""
|
||||
else:
|
||||
message = """Info: Tracing is disabled.
|
||||
|
||||
To enable tracing, do any one of these:
|
||||
• Set tracing=True in your Flow code
|
||||
• Set CREWAI_TRACING_ENABLED=true in your project's .env file
|
||||
• Run: crewai traces enable"""
|
||||
|
||||
panel = Panel(
|
||||
message,
|
||||
title="Tracing Status",
|
||||
border_style="blue",
|
||||
padding=(1, 2),
|
||||
)
|
||||
console.print(panel)
|
||||
|
||||
@@ -39,8 +39,8 @@ from crewai.events.types.agent_events import (
|
||||
from crewai.events.types.logging_events import AgentLogsExecutionEvent
|
||||
from crewai.flow.flow_trackable import FlowTrackable
|
||||
from crewai.lite_agent_output import LiteAgentOutput
|
||||
from crewai.llm import LLM
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.llm.base_llm import BaseLLM
|
||||
from crewai.llm.core import LLM
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.tools.structured_tool import CrewStructuredTool
|
||||
from crewai.utilities.agent_utils import (
|
||||
@@ -504,7 +504,7 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
AgentFinish: The final result of the agent execution.
|
||||
"""
|
||||
# Execute the agent loop
|
||||
formatted_answer = None
|
||||
formatted_answer: AgentAction | AgentFinish | None = None
|
||||
while not isinstance(formatted_answer, AgentFinish):
|
||||
try:
|
||||
if has_reached_max_iterations(self._iterations, self.max_iterations):
|
||||
@@ -553,7 +553,8 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
show_logs=self._show_logs,
|
||||
)
|
||||
|
||||
self._append_message(formatted_answer.text, role="assistant")
|
||||
if formatted_answer is not None:
|
||||
self._append_message(formatted_answer.text, role="assistant")
|
||||
except OutputParserError as e: # noqa: PERF203
|
||||
self._printer.print(
|
||||
content="Failed to parse LLM output. Retrying...",
|
||||
|
||||
4
lib/crewai/src/crewai/llm/__init__.py
Normal file
4
lib/crewai/src/crewai/llm/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
from crewai.llm.core import LLM
|
||||
|
||||
|
||||
__all__ = ["LLM"]
|
||||
588
lib/crewai/src/crewai/llm/base_llm.py
Normal file
588
lib/crewai/src/crewai/llm/base_llm.py
Normal file
@@ -0,0 +1,588 @@
|
||||
"""Base LLM abstract class for CrewAI.
|
||||
|
||||
This module provides the abstract base class for all LLM implementations
|
||||
in CrewAI, including common functionality for native SDK implementations.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from datetime import datetime
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
from typing import TYPE_CHECKING, Any, Final
|
||||
|
||||
from dotenv import load_dotenv
|
||||
import httpx
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.llm_events import (
|
||||
LLMCallCompletedEvent,
|
||||
LLMCallFailedEvent,
|
||||
LLMCallStartedEvent,
|
||||
LLMCallType,
|
||||
LLMStreamChunkEvent,
|
||||
)
|
||||
from crewai.events.types.tool_usage_events import (
|
||||
ToolUsageErrorEvent,
|
||||
ToolUsageFinishedEvent,
|
||||
ToolUsageStartedEvent,
|
||||
)
|
||||
from crewai.llm.hooks.base import BaseInterceptor
|
||||
from crewai.llm.internal.meta import LLMMeta
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agent.core import Agent
|
||||
from crewai.task import Task
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.utilities.types import LLMMessage
|
||||
|
||||
|
||||
load_dotenv()
|
||||
|
||||
DEFAULT_CONTEXT_WINDOW_SIZE: Final[int] = 4096
|
||||
DEFAULT_SUPPORTS_STOP_WORDS: Final[bool] = True
|
||||
_JSON_EXTRACTION_PATTERN: Final[re.Pattern[str]] = re.compile(r"\{.*}", re.DOTALL)
|
||||
|
||||
|
||||
class BaseLLM(BaseModel, ABC, metaclass=LLMMeta):
|
||||
"""Abstract base class for LLM implementations.
|
||||
|
||||
This class defines the interface that all LLM implementations must follow.
|
||||
Users can extend this class to create custom LLM implementations that don't
|
||||
rely on litellm's authentication mechanism.
|
||||
|
||||
Custom LLM implementations should handle error cases gracefully, including
|
||||
timeouts, authentication failures, and malformed responses. They should also
|
||||
implement proper validation for input parameters and provide clear error
|
||||
messages when things go wrong.
|
||||
|
||||
Attributes:
|
||||
model: The model identifier/name.
|
||||
temperature: Optional temperature setting for response generation.
|
||||
stop: A list of stop sequences that the LLM should use to stop generation.
|
||||
"""
|
||||
|
||||
# Core fields
|
||||
model: str = Field(..., description="The model identifier/name")
|
||||
temperature: float | None = Field(
|
||||
None, description="Temperature setting for response generation"
|
||||
)
|
||||
api_key: str | None = Field(None, description="API key for authentication")
|
||||
base_url: str | None = Field(None, description="Base URL for API requests")
|
||||
provider: str = Field(
|
||||
default="openai", description="Provider name (openai, anthropic, etc.)"
|
||||
)
|
||||
stop: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Stop sequences for generation",
|
||||
alias="stop_sequences",
|
||||
)
|
||||
|
||||
# Internal fields
|
||||
is_litellm: bool = Field(
|
||||
default=False, description="Whether this instance uses LiteLLM"
|
||||
)
|
||||
interceptor: BaseInterceptor[httpx.Request, httpx.Response] | None = Field(
|
||||
default=None, description="HTTP request/response interceptor"
|
||||
)
|
||||
_token_usage: dict[str, int] = {
|
||||
"total_tokens": 0,
|
||||
"prompt_tokens": 0,
|
||||
"completion_tokens": 0,
|
||||
"successful_requests": 0,
|
||||
"cached_prompt_tokens": 0,
|
||||
}
|
||||
|
||||
@field_validator("api_key", mode="after")
|
||||
@classmethod
|
||||
def _validate_api_key(cls, value: str | None) -> str | None:
|
||||
"""Validate API key for authentication.
|
||||
|
||||
Args:
|
||||
value: API key value or None
|
||||
|
||||
Returns:
|
||||
API key from environment if not provided, or the original value
|
||||
"""
|
||||
if value is None:
|
||||
cls_name = cls.__name__
|
||||
provider_prefix = cls_name.replace("Completion", "").upper()
|
||||
env_var = f"{provider_prefix}_API_KEY"
|
||||
value = os.getenv(env_var)
|
||||
return value
|
||||
|
||||
@field_validator("stop", mode="before")
|
||||
@classmethod
|
||||
def _normalize_stop(cls, value: Any) -> list[str]:
|
||||
"""Normalize stop sequences to a list.
|
||||
|
||||
Args:
|
||||
value: Stop sequences as string, list, or None
|
||||
|
||||
Returns:
|
||||
Normalized list of stop sequences
|
||||
"""
|
||||
if value is None:
|
||||
return []
|
||||
if isinstance(value, str):
|
||||
return [value]
|
||||
if isinstance(value, list):
|
||||
return value
|
||||
return []
|
||||
|
||||
@property
|
||||
def additional_params(self) -> dict[str, Any]:
|
||||
"""Get additional parameters stored as extra fields.
|
||||
|
||||
Returns:
|
||||
Dictionary of additional parameters
|
||||
"""
|
||||
return self.__pydantic_extra__ or {}
|
||||
|
||||
@additional_params.setter
|
||||
def additional_params(self, value: dict[str, Any]) -> None:
|
||||
"""Set additional parameters as extra fields.
|
||||
|
||||
Args:
|
||||
value: Dictionary of additional parameters to set
|
||||
"""
|
||||
if not isinstance(value, dict):
|
||||
raise ValueError("additional_params must be a dictionary")
|
||||
if self.__pydantic_extra__ is None:
|
||||
self.__pydantic_extra__ = {}
|
||||
self.__pydantic_extra__.update(value)
|
||||
|
||||
@abstractmethod
|
||||
def call(
|
||||
self,
|
||||
messages: str | list[LLMMessage],
|
||||
tools: list[dict[str, BaseTool]] | None = None,
|
||||
callbacks: list[Any] | None = None,
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str | Any:
|
||||
"""Call the LLM with the given messages.
|
||||
|
||||
Args:
|
||||
messages: Input messages for the LLM.
|
||||
Can be a string or list of message dictionaries.
|
||||
If string, it will be converted to a single user message.
|
||||
If list, each dict must have 'role' and 'content' keys.
|
||||
tools: Optional list of tool schemas for function calling.
|
||||
Each tool should define its name, description, and parameters.
|
||||
callbacks: Optional list of callback functions to be executed
|
||||
during and after the LLM call.
|
||||
available_functions: Optional dict mapping function names to callables
|
||||
that can be invoked by the LLM.
|
||||
from_task: Optional task caller to be used for the LLM call.
|
||||
from_agent: Optional agent caller to be used for the LLM call.
|
||||
response_model: Optional response model to be used for the LLM call.
|
||||
|
||||
Returns:
|
||||
Either a text response from the LLM (str) or
|
||||
the result of a tool function call (Any).
|
||||
|
||||
Raises:
|
||||
ValueError: If the messages format is invalid.
|
||||
TimeoutError: If the LLM request times out.
|
||||
RuntimeError: If the LLM request fails for other reasons.
|
||||
"""
|
||||
|
||||
def _convert_tools_for_interference(
|
||||
self, tools: list[dict[str, BaseTool]]
|
||||
) -> list[dict[str, BaseTool]]:
|
||||
"""Convert tools to a format that can be used for interference.
|
||||
|
||||
Args:
|
||||
tools: List of tools to convert.
|
||||
|
||||
Returns:
|
||||
List of converted tools (default implementation returns as-is)
|
||||
"""
|
||||
return tools
|
||||
|
||||
def supports_stop_words(self) -> bool:
|
||||
"""Check if the LLM supports stop words.
|
||||
|
||||
Returns:
|
||||
True if the LLM supports stop words, False otherwise.
|
||||
"""
|
||||
return DEFAULT_SUPPORTS_STOP_WORDS
|
||||
|
||||
def _supports_stop_words_implementation(self) -> bool:
|
||||
"""Check if stop words are configured for this LLM instance.
|
||||
|
||||
Native providers can override supports_stop_words() to return this value
|
||||
to ensure consistent behavior based on whether stop words are actually configured.
|
||||
|
||||
Returns:
|
||||
True if stop words are configured and can be applied
|
||||
"""
|
||||
return bool(self.stop)
|
||||
|
||||
def _apply_stop_words(self, content: str) -> str:
|
||||
"""Apply stop words to truncate response content.
|
||||
|
||||
This method provides consistent stop word behavior across all native SDK providers.
|
||||
Native providers should call this method to post-process their responses.
|
||||
|
||||
Args:
|
||||
content: The raw response content from the LLM
|
||||
|
||||
Returns:
|
||||
Content truncated at the first occurrence of any stop word
|
||||
|
||||
Example:
|
||||
>>> llm = MyNativeLLM(stop=["Observation:", "Final Answer:"])
|
||||
>>> response = (
|
||||
... "I need to search.\\n\\nAction: search\\nObservation: Found results"
|
||||
... )
|
||||
>>> llm._apply_stop_words(response)
|
||||
"I need to search.\\n\\nAction: search"
|
||||
"""
|
||||
if not self.stop or not content:
|
||||
return content
|
||||
|
||||
# Find the earliest occurrence of any stop word
|
||||
earliest_stop_pos = len(content)
|
||||
found_stop_word = None
|
||||
|
||||
for stop_word in self.stop:
|
||||
stop_pos = content.find(stop_word)
|
||||
if stop_pos != -1 and stop_pos < earliest_stop_pos:
|
||||
earliest_stop_pos = stop_pos
|
||||
found_stop_word = stop_word
|
||||
|
||||
# Truncate at the stop word if found
|
||||
if found_stop_word is not None:
|
||||
truncated = content[:earliest_stop_pos].strip()
|
||||
logging.debug(
|
||||
f"Applied stop word '{found_stop_word}' at position {earliest_stop_pos}"
|
||||
)
|
||||
return truncated
|
||||
|
||||
return content
|
||||
|
||||
def get_context_window_size(self) -> int:
|
||||
"""Get the context window size for the LLM.
|
||||
|
||||
Returns:
|
||||
The number of tokens/characters the model can handle.
|
||||
"""
|
||||
# Default implementation - subclasses should override with model-specific values
|
||||
return DEFAULT_CONTEXT_WINDOW_SIZE
|
||||
|
||||
# Common helper methods for native SDK implementations
|
||||
|
||||
def _emit_call_started_event(
|
||||
self,
|
||||
messages: str | list[LLMMessage],
|
||||
tools: list[dict[str, BaseTool]] | None = None,
|
||||
callbacks: list[Any] | None = None,
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
) -> None:
|
||||
"""Emit LLM call started event."""
|
||||
if not hasattr(crewai_event_bus, "emit"):
|
||||
raise ValueError("crewai_event_bus does not have an emit method") from None
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LLMCallStartedEvent(
|
||||
messages=messages,
|
||||
tools=tools,
|
||||
callbacks=callbacks,
|
||||
available_functions=available_functions,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
model=self.model,
|
||||
),
|
||||
)
|
||||
|
||||
def _emit_call_completed_event(
|
||||
self,
|
||||
response: Any,
|
||||
call_type: LLMCallType,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
messages: str | list[dict[str, Any]] | None = None,
|
||||
) -> None:
|
||||
"""Emit LLM call completed event."""
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LLMCallCompletedEvent(
|
||||
messages=messages,
|
||||
response=response,
|
||||
call_type=call_type,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
model=self.model,
|
||||
),
|
||||
)
|
||||
|
||||
def _emit_call_failed_event(
|
||||
self,
|
||||
error: str,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
) -> None:
|
||||
"""Emit LLM call failed event."""
|
||||
if not hasattr(crewai_event_bus, "emit"):
|
||||
raise ValueError("crewai_event_bus does not have an emit method") from None
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LLMCallFailedEvent(
|
||||
error=error,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
def _emit_stream_chunk_event(
|
||||
self,
|
||||
chunk: str,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
tool_call: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
"""Emit stream chunk event."""
|
||||
if not hasattr(crewai_event_bus, "emit"):
|
||||
raise ValueError("crewai_event_bus does not have an emit method") from None
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LLMStreamChunkEvent(
|
||||
chunk=chunk,
|
||||
tool_call=tool_call,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
def _handle_tool_execution(
|
||||
self,
|
||||
function_name: str,
|
||||
function_args: dict[str, Any],
|
||||
available_functions: dict[str, Any],
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
) -> str | None:
|
||||
"""Handle tool execution with proper event emission.
|
||||
|
||||
Args:
|
||||
function_name: Name of the function to execute
|
||||
function_args: Arguments to pass to the function
|
||||
available_functions: Dict of available functions
|
||||
from_task: Optional task object
|
||||
from_agent: Optional agent object
|
||||
|
||||
Returns:
|
||||
Result of function execution or None if function not found
|
||||
"""
|
||||
if function_name not in available_functions:
|
||||
logging.warning(
|
||||
f"Function '{function_name}' not found in available functions"
|
||||
)
|
||||
return None
|
||||
|
||||
try:
|
||||
# Emit tool usage started event
|
||||
started_at = datetime.now()
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=ToolUsageStartedEvent(
|
||||
tool_name=function_name,
|
||||
tool_args=function_args,
|
||||
from_agent=from_agent,
|
||||
from_task=from_task,
|
||||
),
|
||||
)
|
||||
|
||||
# Execute the function
|
||||
fn = available_functions[function_name]
|
||||
result = fn(**function_args)
|
||||
|
||||
# Emit tool usage finished event
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=ToolUsageFinishedEvent(
|
||||
output=result,
|
||||
tool_name=function_name,
|
||||
tool_args=function_args,
|
||||
started_at=started_at,
|
||||
finished_at=datetime.now(),
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
# Emit LLM call completed event for tool call
|
||||
self._emit_call_completed_event(
|
||||
response=result,
|
||||
call_type=LLMCallType.TOOL_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
)
|
||||
|
||||
return str(result)
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Error executing function '{function_name}': {e!s}"
|
||||
logging.error(error_msg)
|
||||
|
||||
# Emit tool usage error event
|
||||
if not hasattr(crewai_event_bus, "emit"):
|
||||
raise ValueError(
|
||||
"crewai_event_bus does not have an emit method"
|
||||
) from None
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=ToolUsageErrorEvent(
|
||||
tool_name=function_name,
|
||||
tool_args=function_args,
|
||||
error=error_msg,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
# Emit LLM call failed event
|
||||
self._emit_call_failed_event(
|
||||
error=error_msg,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
)
|
||||
|
||||
return None
|
||||
|
||||
def _format_messages(self, messages: str | list[LLMMessage]) -> list[LLMMessage]:
|
||||
"""Convert messages to standard format.
|
||||
|
||||
Args:
|
||||
messages: Input messages (string or list of message dicts)
|
||||
|
||||
Returns:
|
||||
List of message dictionaries with 'role' and 'content' keys
|
||||
|
||||
Raises:
|
||||
ValueError: If message format is invalid
|
||||
"""
|
||||
if isinstance(messages, str):
|
||||
return [{"role": "user", "content": messages}]
|
||||
|
||||
# Validate message format
|
||||
for i, msg in enumerate(messages):
|
||||
if not isinstance(msg, dict):
|
||||
raise ValueError(f"Message at index {i} must be a dictionary")
|
||||
if "role" not in msg or "content" not in msg:
|
||||
raise ValueError(
|
||||
f"Message at index {i} must have 'role' and 'content' keys"
|
||||
)
|
||||
|
||||
return messages
|
||||
|
||||
@staticmethod
|
||||
def _validate_structured_output(
|
||||
response: str,
|
||||
response_format: type[BaseModel] | None,
|
||||
) -> str | BaseModel:
|
||||
"""Validate and parse structured output.
|
||||
|
||||
Args:
|
||||
response: Raw response string
|
||||
response_format: Optional Pydantic model for structured output
|
||||
|
||||
Returns:
|
||||
Parsed response (BaseModel instance if response_format provided, otherwise string)
|
||||
|
||||
Raises:
|
||||
ValueError: If structured output validation fails
|
||||
"""
|
||||
if response_format is None:
|
||||
return response
|
||||
|
||||
try:
|
||||
# Try to parse as JSON first
|
||||
if response.strip().startswith("{") or response.strip().startswith("["):
|
||||
data = json.loads(response)
|
||||
return response_format.model_validate(data)
|
||||
|
||||
json_match = _JSON_EXTRACTION_PATTERN.search(response)
|
||||
if json_match:
|
||||
data = json.loads(json_match.group())
|
||||
return response_format.model_validate(data)
|
||||
|
||||
raise ValueError("No JSON found in response")
|
||||
|
||||
except (json.JSONDecodeError, ValueError) as e:
|
||||
logging.warning(f"Failed to parse structured output: {e}")
|
||||
raise ValueError(
|
||||
f"Failed to parse response into {response_format.__name__}: {e}"
|
||||
) from e
|
||||
|
||||
@staticmethod
|
||||
def _extract_provider(model: str) -> str:
|
||||
"""Extract provider from model string.
|
||||
|
||||
Args:
|
||||
model: Model string (e.g., 'openai/gpt-4' or 'gpt-4')
|
||||
|
||||
Returns:
|
||||
Provider name (e.g., 'openai')
|
||||
"""
|
||||
if "/" in model:
|
||||
return model.partition("/")[0]
|
||||
return "openai" # Default provider
|
||||
|
||||
def _track_token_usage_internal(self, usage_data: dict[str, Any]) -> None:
|
||||
"""Track token usage internally in the LLM instance.
|
||||
|
||||
Args:
|
||||
usage_data: Token usage data from the API response
|
||||
"""
|
||||
# Extract tokens in a provider-agnostic way
|
||||
prompt_tokens = (
|
||||
usage_data.get("prompt_tokens")
|
||||
or usage_data.get("prompt_token_count")
|
||||
or usage_data.get("input_tokens")
|
||||
or 0
|
||||
)
|
||||
|
||||
completion_tokens = (
|
||||
usage_data.get("completion_tokens")
|
||||
or usage_data.get("candidates_token_count")
|
||||
or usage_data.get("output_tokens")
|
||||
or 0
|
||||
)
|
||||
|
||||
cached_tokens = (
|
||||
usage_data.get("cached_tokens")
|
||||
or usage_data.get("cached_prompt_tokens")
|
||||
or 0
|
||||
)
|
||||
|
||||
self._token_usage["prompt_tokens"] += prompt_tokens
|
||||
self._token_usage["completion_tokens"] += completion_tokens
|
||||
self._token_usage["total_tokens"] += prompt_tokens + completion_tokens
|
||||
self._token_usage["successful_requests"] += 1
|
||||
self._token_usage["cached_prompt_tokens"] += cached_tokens
|
||||
|
||||
def get_token_usage_summary(self) -> UsageMetrics:
|
||||
"""Get summary of token usage for this LLM instance.
|
||||
|
||||
Returns:
|
||||
Dictionary with token usage totals
|
||||
"""
|
||||
return UsageMetrics(**self._token_usage)
|
||||
587
lib/crewai/src/crewai/llm/constants.py
Normal file
587
lib/crewai/src/crewai/llm/constants.py
Normal file
@@ -0,0 +1,587 @@
|
||||
from typing import Literal, TypeAlias
|
||||
|
||||
|
||||
SupportedNativeProviders: TypeAlias = Literal[
|
||||
"openai",
|
||||
"anthropic",
|
||||
"claude",
|
||||
"azure",
|
||||
"azure_openai",
|
||||
"google",
|
||||
"gemini",
|
||||
"bedrock",
|
||||
"aws",
|
||||
]
|
||||
|
||||
SUPPORTED_NATIVE_PROVIDERS: list[SupportedNativeProviders] = [
|
||||
"openai",
|
||||
"anthropic",
|
||||
"claude",
|
||||
"azure",
|
||||
"azure_openai",
|
||||
"google",
|
||||
"gemini",
|
||||
"bedrock",
|
||||
"aws",
|
||||
]
|
||||
|
||||
|
||||
OpenAIModels: TypeAlias = Literal[
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-0301",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
"gpt-3.5-turbo-instruct",
|
||||
"gpt-3.5-turbo-instruct-0914",
|
||||
"gpt-4",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4.1",
|
||||
"gpt-4.1-2025-04-14",
|
||||
"gpt-4.1-mini",
|
||||
"gpt-4.1-mini-2025-04-14",
|
||||
"gpt-4.1-nano",
|
||||
"gpt-4.1-nano-2025-04-14",
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4o-2024-08-06",
|
||||
"gpt-4o-2024-11-20",
|
||||
"gpt-4o-audio-preview",
|
||||
"gpt-4o-audio-preview-2024-10-01",
|
||||
"gpt-4o-audio-preview-2024-12-17",
|
||||
"gpt-4o-audio-preview-2025-06-03",
|
||||
"gpt-4o-mini",
|
||||
"gpt-4o-mini-2024-07-18",
|
||||
"gpt-4o-mini-audio-preview",
|
||||
"gpt-4o-mini-audio-preview-2024-12-17",
|
||||
"gpt-4o-mini-realtime-preview",
|
||||
"gpt-4o-mini-realtime-preview-2024-12-17",
|
||||
"gpt-4o-mini-search-preview",
|
||||
"gpt-4o-mini-search-preview-2025-03-11",
|
||||
"gpt-4o-mini-transcribe",
|
||||
"gpt-4o-mini-tts",
|
||||
"gpt-4o-realtime-preview",
|
||||
"gpt-4o-realtime-preview-2024-10-01",
|
||||
"gpt-4o-realtime-preview-2024-12-17",
|
||||
"gpt-4o-realtime-preview-2025-06-03",
|
||||
"gpt-4o-search-preview",
|
||||
"gpt-4o-search-preview-2025-03-11",
|
||||
"gpt-4o-transcribe",
|
||||
"gpt-4o-transcribe-diarize",
|
||||
"gpt-5",
|
||||
"gpt-5-2025-08-07",
|
||||
"gpt-5-chat",
|
||||
"gpt-5-chat-latest",
|
||||
"gpt-5-codex",
|
||||
"gpt-5-mini",
|
||||
"gpt-5-mini-2025-08-07",
|
||||
"gpt-5-nano",
|
||||
"gpt-5-nano-2025-08-07",
|
||||
"gpt-5-pro",
|
||||
"gpt-5-pro-2025-10-06",
|
||||
"gpt-5-search-api",
|
||||
"gpt-5-search-api-2025-10-14",
|
||||
"gpt-audio",
|
||||
"gpt-audio-2025-08-28",
|
||||
"gpt-audio-mini",
|
||||
"gpt-audio-mini-2025-10-06",
|
||||
"gpt-image-1",
|
||||
"gpt-image-1-mini",
|
||||
"gpt-realtime",
|
||||
"gpt-realtime-2025-08-28",
|
||||
"gpt-realtime-mini",
|
||||
"gpt-realtime-mini-2025-10-06",
|
||||
"o1",
|
||||
"o1-preview",
|
||||
"o1-2024-12-17",
|
||||
"o1-mini",
|
||||
"o1-mini-2024-09-12",
|
||||
"o1-pro",
|
||||
"o1-pro-2025-03-19",
|
||||
"o3-mini",
|
||||
"o3",
|
||||
"o4-mini",
|
||||
"whisper-1",
|
||||
]
|
||||
OPENAI_MODELS: list[OpenAIModels] = [
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-0301",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
"gpt-3.5-turbo-instruct",
|
||||
"gpt-3.5-turbo-instruct-0914",
|
||||
"gpt-4",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4.1",
|
||||
"gpt-4.1-2025-04-14",
|
||||
"gpt-4.1-mini",
|
||||
"gpt-4.1-mini-2025-04-14",
|
||||
"gpt-4.1-nano",
|
||||
"gpt-4.1-nano-2025-04-14",
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4o-2024-08-06",
|
||||
"gpt-4o-2024-11-20",
|
||||
"gpt-4o-audio-preview",
|
||||
"gpt-4o-audio-preview-2024-10-01",
|
||||
"gpt-4o-audio-preview-2024-12-17",
|
||||
"gpt-4o-audio-preview-2025-06-03",
|
||||
"gpt-4o-mini",
|
||||
"gpt-4o-mini-2024-07-18",
|
||||
"gpt-4o-mini-audio-preview",
|
||||
"gpt-4o-mini-audio-preview-2024-12-17",
|
||||
"gpt-4o-mini-realtime-preview",
|
||||
"gpt-4o-mini-realtime-preview-2024-12-17",
|
||||
"gpt-4o-mini-search-preview",
|
||||
"gpt-4o-mini-search-preview-2025-03-11",
|
||||
"gpt-4o-mini-transcribe",
|
||||
"gpt-4o-mini-tts",
|
||||
"gpt-4o-realtime-preview",
|
||||
"gpt-4o-realtime-preview-2024-10-01",
|
||||
"gpt-4o-realtime-preview-2024-12-17",
|
||||
"gpt-4o-realtime-preview-2025-06-03",
|
||||
"gpt-4o-search-preview",
|
||||
"gpt-4o-search-preview-2025-03-11",
|
||||
"gpt-4o-transcribe",
|
||||
"gpt-4o-transcribe-diarize",
|
||||
"gpt-5",
|
||||
"gpt-5-2025-08-07",
|
||||
"gpt-5-chat",
|
||||
"gpt-5-chat-latest",
|
||||
"gpt-5-codex",
|
||||
"gpt-5-mini",
|
||||
"gpt-5-mini-2025-08-07",
|
||||
"gpt-5-nano",
|
||||
"gpt-5-nano-2025-08-07",
|
||||
"gpt-5-pro",
|
||||
"gpt-5-pro-2025-10-06",
|
||||
"gpt-5-search-api",
|
||||
"gpt-5-search-api-2025-10-14",
|
||||
"gpt-audio",
|
||||
"gpt-audio-2025-08-28",
|
||||
"gpt-audio-mini",
|
||||
"gpt-audio-mini-2025-10-06",
|
||||
"gpt-image-1",
|
||||
"gpt-image-1-mini",
|
||||
"gpt-realtime",
|
||||
"gpt-realtime-2025-08-28",
|
||||
"gpt-realtime-mini",
|
||||
"gpt-realtime-mini-2025-10-06",
|
||||
"o1",
|
||||
"o1-preview",
|
||||
"o1-2024-12-17",
|
||||
"o1-mini",
|
||||
"o1-mini-2024-09-12",
|
||||
"o1-pro",
|
||||
"o1-pro-2025-03-19",
|
||||
"o3-mini",
|
||||
"o3",
|
||||
"o4-mini",
|
||||
"whisper-1",
|
||||
]
|
||||
|
||||
|
||||
AnthropicModels: TypeAlias = Literal[
|
||||
"claude-3-7-sonnet-latest",
|
||||
"claude-3-7-sonnet-20250219",
|
||||
"claude-3-5-haiku-latest",
|
||||
"claude-3-5-haiku-20241022",
|
||||
"claude-haiku-4-5",
|
||||
"claude-haiku-4-5-20251001",
|
||||
"claude-sonnet-4-20250514",
|
||||
"claude-sonnet-4-0",
|
||||
"claude-4-sonnet-20250514",
|
||||
"claude-sonnet-4-5",
|
||||
"claude-sonnet-4-5-20250929",
|
||||
"claude-3-5-sonnet-latest",
|
||||
"claude-3-5-sonnet-20241022",
|
||||
"claude-3-5-sonnet-20240620",
|
||||
"claude-opus-4-0",
|
||||
"claude-opus-4-20250514",
|
||||
"claude-4-opus-20250514",
|
||||
"claude-opus-4-1",
|
||||
"claude-opus-4-1-20250805",
|
||||
"claude-3-opus-latest",
|
||||
"claude-3-opus-20240229",
|
||||
"claude-3-sonnet-20240229",
|
||||
"claude-3-haiku-latest",
|
||||
"claude-3-haiku-20240307",
|
||||
]
|
||||
ANTHROPIC_MODELS: list[AnthropicModels] = [
|
||||
"claude-3-7-sonnet-latest",
|
||||
"claude-3-7-sonnet-20250219",
|
||||
"claude-3-5-haiku-latest",
|
||||
"claude-3-5-haiku-20241022",
|
||||
"claude-haiku-4-5",
|
||||
"claude-haiku-4-5-20251001",
|
||||
"claude-sonnet-4-20250514",
|
||||
"claude-sonnet-4-0",
|
||||
"claude-4-sonnet-20250514",
|
||||
"claude-sonnet-4-5",
|
||||
"claude-sonnet-4-5-20250929",
|
||||
"claude-3-5-sonnet-latest",
|
||||
"claude-3-5-sonnet-20241022",
|
||||
"claude-3-5-sonnet-20240620",
|
||||
"claude-opus-4-0",
|
||||
"claude-opus-4-20250514",
|
||||
"claude-4-opus-20250514",
|
||||
"claude-opus-4-1",
|
||||
"claude-opus-4-1-20250805",
|
||||
"claude-3-opus-latest",
|
||||
"claude-3-opus-20240229",
|
||||
"claude-3-sonnet-20240229",
|
||||
"claude-3-haiku-latest",
|
||||
"claude-3-haiku-20240307",
|
||||
]
|
||||
|
||||
GeminiModels: TypeAlias = Literal[
|
||||
"gemini-2.5-pro",
|
||||
"gemini-2.5-pro-preview-03-25",
|
||||
"gemini-2.5-pro-preview-05-06",
|
||||
"gemini-2.5-pro-preview-06-05",
|
||||
"gemini-2.5-flash",
|
||||
"gemini-2.5-flash-preview-05-20",
|
||||
"gemini-2.5-flash-preview-04-17",
|
||||
"gemini-2.5-flash-image",
|
||||
"gemini-2.5-flash-image-preview",
|
||||
"gemini-2.5-flash-lite",
|
||||
"gemini-2.5-flash-lite-preview-06-17",
|
||||
"gemini-2.5-flash-preview-09-2025",
|
||||
"gemini-2.5-flash-lite-preview-09-2025",
|
||||
"gemini-2.5-flash-preview-tts",
|
||||
"gemini-2.5-pro-preview-tts",
|
||||
"gemini-2.5-computer-use-preview-10-2025",
|
||||
"gemini-2.0-flash",
|
||||
"gemini-2.0-flash-001",
|
||||
"gemini-2.0-flash-exp",
|
||||
"gemini-2.0-flash-exp-image-generation",
|
||||
"gemini-2.0-flash-lite",
|
||||
"gemini-2.0-flash-lite-001",
|
||||
"gemini-2.0-flash-lite-preview",
|
||||
"gemini-2.0-flash-lite-preview-02-05",
|
||||
"gemini-2.0-flash-preview-image-generation",
|
||||
"gemini-2.0-flash-thinking-exp",
|
||||
"gemini-2.0-flash-thinking-exp-01-21",
|
||||
"gemini-2.0-flash-thinking-exp-1219",
|
||||
"gemini-2.0-pro-exp",
|
||||
"gemini-2.0-pro-exp-02-05",
|
||||
"gemini-exp-1206",
|
||||
"gemini-1.5-pro",
|
||||
"gemini-1.5-flash",
|
||||
"gemini-1.5-flash-8b",
|
||||
"gemini-flash-latest",
|
||||
"gemini-flash-lite-latest",
|
||||
"gemini-pro-latest",
|
||||
"gemini-2.0-flash-live-001",
|
||||
"gemini-live-2.5-flash-preview",
|
||||
"gemini-2.5-flash-live-preview",
|
||||
"gemini-robotics-er-1.5-preview",
|
||||
"gemini-gemma-2-27b-it",
|
||||
"gemini-gemma-2-9b-it",
|
||||
"gemma-3-1b-it",
|
||||
"gemma-3-4b-it",
|
||||
"gemma-3-12b-it",
|
||||
"gemma-3-27b-it",
|
||||
"gemma-3n-e2b-it",
|
||||
"gemma-3n-e4b-it",
|
||||
"learnlm-2.0-flash-experimental",
|
||||
]
|
||||
GEMINI_MODELS: list[GeminiModels] = [
|
||||
"gemini-2.5-pro",
|
||||
"gemini-2.5-pro-preview-03-25",
|
||||
"gemini-2.5-pro-preview-05-06",
|
||||
"gemini-2.5-pro-preview-06-05",
|
||||
"gemini-2.5-flash",
|
||||
"gemini-2.5-flash-preview-05-20",
|
||||
"gemini-2.5-flash-preview-04-17",
|
||||
"gemini-2.5-flash-image",
|
||||
"gemini-2.5-flash-image-preview",
|
||||
"gemini-2.5-flash-lite",
|
||||
"gemini-2.5-flash-lite-preview-06-17",
|
||||
"gemini-2.5-flash-preview-09-2025",
|
||||
"gemini-2.5-flash-lite-preview-09-2025",
|
||||
"gemini-2.5-flash-preview-tts",
|
||||
"gemini-2.5-pro-preview-tts",
|
||||
"gemini-2.5-computer-use-preview-10-2025",
|
||||
"gemini-2.0-flash",
|
||||
"gemini-2.0-flash-001",
|
||||
"gemini-2.0-flash-exp",
|
||||
"gemini-2.0-flash-exp-image-generation",
|
||||
"gemini-2.0-flash-lite",
|
||||
"gemini-2.0-flash-lite-001",
|
||||
"gemini-2.0-flash-lite-preview",
|
||||
"gemini-2.0-flash-lite-preview-02-05",
|
||||
"gemini-2.0-flash-preview-image-generation",
|
||||
"gemini-2.0-flash-thinking-exp",
|
||||
"gemini-2.0-flash-thinking-exp-01-21",
|
||||
"gemini-2.0-flash-thinking-exp-1219",
|
||||
"gemini-2.0-pro-exp",
|
||||
"gemini-2.0-pro-exp-02-05",
|
||||
"gemini-exp-1206",
|
||||
"gemini-1.5-pro",
|
||||
"gemini-1.5-flash",
|
||||
"gemini-1.5-flash-8b",
|
||||
"gemini-flash-latest",
|
||||
"gemini-flash-lite-latest",
|
||||
"gemini-pro-latest",
|
||||
"gemini-2.0-flash-live-001",
|
||||
"gemini-live-2.5-flash-preview",
|
||||
"gemini-2.5-flash-live-preview",
|
||||
"gemini-robotics-er-1.5-preview",
|
||||
"gemini-gemma-2-27b-it",
|
||||
"gemini-gemma-2-9b-it",
|
||||
"gemma-3-1b-it",
|
||||
"gemma-3-4b-it",
|
||||
"gemma-3-12b-it",
|
||||
"gemma-3-27b-it",
|
||||
"gemma-3n-e2b-it",
|
||||
"gemma-3n-e4b-it",
|
||||
"learnlm-2.0-flash-experimental",
|
||||
]
|
||||
|
||||
|
||||
AzureModels: TypeAlias = Literal[
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-0301",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
"gpt-35-turbo",
|
||||
"gpt-35-turbo-0125",
|
||||
"gpt-35-turbo-1106",
|
||||
"gpt-35-turbo-16k-0613",
|
||||
"gpt-35-turbo-instruct-0914",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-vision",
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4o-2024-08-06",
|
||||
"gpt-4o-2024-11-20",
|
||||
"gpt-4o-mini",
|
||||
"gpt-5",
|
||||
"o1",
|
||||
"o1-mini",
|
||||
"o1-preview",
|
||||
"o3-mini",
|
||||
"o3",
|
||||
"o4-mini",
|
||||
]
|
||||
AZURE_MODELS: list[AzureModels] = [
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-0301",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
"gpt-35-turbo",
|
||||
"gpt-35-turbo-0125",
|
||||
"gpt-35-turbo-1106",
|
||||
"gpt-35-turbo-16k-0613",
|
||||
"gpt-35-turbo-instruct-0914",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-vision",
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4o-2024-08-06",
|
||||
"gpt-4o-2024-11-20",
|
||||
"gpt-4o-mini",
|
||||
"gpt-5",
|
||||
"o1",
|
||||
"o1-mini",
|
||||
"o1-preview",
|
||||
"o3-mini",
|
||||
"o3",
|
||||
"o4-mini",
|
||||
]
|
||||
|
||||
|
||||
BedrockModels: TypeAlias = Literal[
|
||||
"ai21.jamba-1-5-large-v1:0",
|
||||
"ai21.jamba-1-5-mini-v1:0",
|
||||
"amazon.nova-lite-v1:0",
|
||||
"amazon.nova-lite-v1:0:24k",
|
||||
"amazon.nova-lite-v1:0:300k",
|
||||
"amazon.nova-micro-v1:0",
|
||||
"amazon.nova-micro-v1:0:128k",
|
||||
"amazon.nova-micro-v1:0:24k",
|
||||
"amazon.nova-premier-v1:0",
|
||||
"amazon.nova-premier-v1:0:1000k",
|
||||
"amazon.nova-premier-v1:0:20k",
|
||||
"amazon.nova-premier-v1:0:8k",
|
||||
"amazon.nova-premier-v1:0:mm",
|
||||
"amazon.nova-pro-v1:0",
|
||||
"amazon.nova-pro-v1:0:24k",
|
||||
"amazon.nova-pro-v1:0:300k",
|
||||
"amazon.titan-text-express-v1",
|
||||
"amazon.titan-text-express-v1:0:8k",
|
||||
"amazon.titan-text-lite-v1",
|
||||
"amazon.titan-text-lite-v1:0:4k",
|
||||
"amazon.titan-tg1-large",
|
||||
"anthropic.claude-3-5-haiku-20241022-v1:0",
|
||||
"anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||
"anthropic.claude-3-5-sonnet-20241022-v2:0",
|
||||
"anthropic.claude-3-7-sonnet-20250219-v1:0",
|
||||
"anthropic.claude-3-haiku-20240307-v1:0",
|
||||
"anthropic.claude-3-haiku-20240307-v1:0:200k",
|
||||
"anthropic.claude-3-haiku-20240307-v1:0:48k",
|
||||
"anthropic.claude-3-opus-20240229-v1:0",
|
||||
"anthropic.claude-3-opus-20240229-v1:0:12k",
|
||||
"anthropic.claude-3-opus-20240229-v1:0:200k",
|
||||
"anthropic.claude-3-opus-20240229-v1:0:28k",
|
||||
"anthropic.claude-3-sonnet-20240229-v1:0",
|
||||
"anthropic.claude-3-sonnet-20240229-v1:0:200k",
|
||||
"anthropic.claude-3-sonnet-20240229-v1:0:28k",
|
||||
"anthropic.claude-haiku-4-5-20251001-v1:0",
|
||||
"anthropic.claude-instant-v1:2:100k",
|
||||
"anthropic.claude-opus-4-1-20250805-v1:0",
|
||||
"anthropic.claude-opus-4-20250514-v1:0",
|
||||
"anthropic.claude-sonnet-4-20250514-v1:0",
|
||||
"anthropic.claude-sonnet-4-5-20250929-v1:0",
|
||||
"anthropic.claude-v2:0:100k",
|
||||
"anthropic.claude-v2:0:18k",
|
||||
"anthropic.claude-v2:1:18k",
|
||||
"anthropic.claude-v2:1:200k",
|
||||
"cohere.command-r-plus-v1:0",
|
||||
"cohere.command-r-v1:0",
|
||||
"cohere.rerank-v3-5:0",
|
||||
"deepseek.r1-v1:0",
|
||||
"meta.llama3-1-70b-instruct-v1:0",
|
||||
"meta.llama3-1-8b-instruct-v1:0",
|
||||
"meta.llama3-2-11b-instruct-v1:0",
|
||||
"meta.llama3-2-1b-instruct-v1:0",
|
||||
"meta.llama3-2-3b-instruct-v1:0",
|
||||
"meta.llama3-2-90b-instruct-v1:0",
|
||||
"meta.llama3-3-70b-instruct-v1:0",
|
||||
"meta.llama3-70b-instruct-v1:0",
|
||||
"meta.llama3-8b-instruct-v1:0",
|
||||
"meta.llama4-maverick-17b-instruct-v1:0",
|
||||
"meta.llama4-scout-17b-instruct-v1:0",
|
||||
"mistral.mistral-7b-instruct-v0:2",
|
||||
"mistral.mistral-large-2402-v1:0",
|
||||
"mistral.mistral-small-2402-v1:0",
|
||||
"mistral.mixtral-8x7b-instruct-v0:1",
|
||||
"mistral.pixtral-large-2502-v1:0",
|
||||
"openai.gpt-oss-120b-1:0",
|
||||
"openai.gpt-oss-20b-1:0",
|
||||
"qwen.qwen3-32b-v1:0",
|
||||
"qwen.qwen3-coder-30b-a3b-v1:0",
|
||||
"twelvelabs.pegasus-1-2-v1:0",
|
||||
]
|
||||
BEDROCK_MODELS: list[BedrockModels] = [
|
||||
"ai21.jamba-1-5-large-v1:0",
|
||||
"ai21.jamba-1-5-mini-v1:0",
|
||||
"amazon.nova-lite-v1:0",
|
||||
"amazon.nova-lite-v1:0:24k",
|
||||
"amazon.nova-lite-v1:0:300k",
|
||||
"amazon.nova-micro-v1:0",
|
||||
"amazon.nova-micro-v1:0:128k",
|
||||
"amazon.nova-micro-v1:0:24k",
|
||||
"amazon.nova-premier-v1:0",
|
||||
"amazon.nova-premier-v1:0:1000k",
|
||||
"amazon.nova-premier-v1:0:20k",
|
||||
"amazon.nova-premier-v1:0:8k",
|
||||
"amazon.nova-premier-v1:0:mm",
|
||||
"amazon.nova-pro-v1:0",
|
||||
"amazon.nova-pro-v1:0:24k",
|
||||
"amazon.nova-pro-v1:0:300k",
|
||||
"amazon.titan-text-express-v1",
|
||||
"amazon.titan-text-express-v1:0:8k",
|
||||
"amazon.titan-text-lite-v1",
|
||||
"amazon.titan-text-lite-v1:0:4k",
|
||||
"amazon.titan-tg1-large",
|
||||
"anthropic.claude-3-5-haiku-20241022-v1:0",
|
||||
"anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||
"anthropic.claude-3-5-sonnet-20241022-v2:0",
|
||||
"anthropic.claude-3-7-sonnet-20250219-v1:0",
|
||||
"anthropic.claude-3-haiku-20240307-v1:0",
|
||||
"anthropic.claude-3-haiku-20240307-v1:0:200k",
|
||||
"anthropic.claude-3-haiku-20240307-v1:0:48k",
|
||||
"anthropic.claude-3-opus-20240229-v1:0",
|
||||
"anthropic.claude-3-opus-20240229-v1:0:12k",
|
||||
"anthropic.claude-3-opus-20240229-v1:0:200k",
|
||||
"anthropic.claude-3-opus-20240229-v1:0:28k",
|
||||
"anthropic.claude-3-sonnet-20240229-v1:0",
|
||||
"anthropic.claude-3-sonnet-20240229-v1:0:200k",
|
||||
"anthropic.claude-3-sonnet-20240229-v1:0:28k",
|
||||
"anthropic.claude-haiku-4-5-20251001-v1:0",
|
||||
"anthropic.claude-instant-v1:2:100k",
|
||||
"anthropic.claude-opus-4-1-20250805-v1:0",
|
||||
"anthropic.claude-opus-4-20250514-v1:0",
|
||||
"anthropic.claude-sonnet-4-20250514-v1:0",
|
||||
"anthropic.claude-sonnet-4-5-20250929-v1:0",
|
||||
"anthropic.claude-v2:0:100k",
|
||||
"anthropic.claude-v2:0:18k",
|
||||
"anthropic.claude-v2:1:18k",
|
||||
"anthropic.claude-v2:1:200k",
|
||||
"cohere.command-r-plus-v1:0",
|
||||
"cohere.command-r-v1:0",
|
||||
"cohere.rerank-v3-5:0",
|
||||
"deepseek.r1-v1:0",
|
||||
"meta.llama3-1-70b-instruct-v1:0",
|
||||
"meta.llama3-1-8b-instruct-v1:0",
|
||||
"meta.llama3-2-11b-instruct-v1:0",
|
||||
"meta.llama3-2-1b-instruct-v1:0",
|
||||
"meta.llama3-2-3b-instruct-v1:0",
|
||||
"meta.llama3-2-90b-instruct-v1:0",
|
||||
"meta.llama3-3-70b-instruct-v1:0",
|
||||
"meta.llama3-70b-instruct-v1:0",
|
||||
"meta.llama3-8b-instruct-v1:0",
|
||||
"meta.llama4-maverick-17b-instruct-v1:0",
|
||||
"meta.llama4-scout-17b-instruct-v1:0",
|
||||
"mistral.mistral-7b-instruct-v0:2",
|
||||
"mistral.mistral-large-2402-v1:0",
|
||||
"mistral.mistral-small-2402-v1:0",
|
||||
"mistral.mixtral-8x7b-instruct-v0:1",
|
||||
"mistral.pixtral-large-2502-v1:0",
|
||||
"openai.gpt-oss-120b-1:0",
|
||||
"openai.gpt-oss-20b-1:0",
|
||||
"qwen.qwen3-32b-v1:0",
|
||||
"qwen.qwen3-coder-30b-a3b-v1:0",
|
||||
"twelvelabs.pegasus-1-2-v1:0",
|
||||
]
|
||||
|
||||
SupportedModels: TypeAlias = (
|
||||
OpenAIModels | AnthropicModels | GeminiModels | AzureModels | BedrockModels
|
||||
)
|
||||
@@ -20,9 +20,7 @@ from typing import (
|
||||
)
|
||||
|
||||
from dotenv import load_dotenv
|
||||
import httpx
|
||||
from pydantic import BaseModel, Field
|
||||
from typing_extensions import Self
|
||||
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.llm_events import (
|
||||
@@ -37,14 +35,7 @@ from crewai.events.types.tool_usage_events import (
|
||||
ToolUsageFinishedEvent,
|
||||
ToolUsageStartedEvent,
|
||||
)
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.llms.constants import (
|
||||
ANTHROPIC_MODELS,
|
||||
AZURE_MODELS,
|
||||
BEDROCK_MODELS,
|
||||
GEMINI_MODELS,
|
||||
OPENAI_MODELS,
|
||||
)
|
||||
from crewai.llm.base_llm import BaseLLM
|
||||
from crewai.utilities import InternalInstructor
|
||||
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
LLMContextLengthExceededError,
|
||||
@@ -61,7 +52,6 @@ if TYPE_CHECKING:
|
||||
from litellm.utils import supports_response_schema
|
||||
|
||||
from crewai.agent.core import Agent
|
||||
from crewai.llms.hooks.base import BaseInterceptor
|
||||
from crewai.task import Task
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.utilities.types import LLMMessage
|
||||
@@ -327,249 +317,57 @@ class AccumulatedToolArgs(BaseModel):
|
||||
|
||||
|
||||
class LLM(BaseLLM):
|
||||
completion_cost: float | None = None
|
||||
"""LiteLLM-based LLM implementation for CrewAI.
|
||||
|
||||
def __new__(cls, model: str, is_litellm: bool = False, **kwargs: Any) -> LLM:
|
||||
"""Factory method that routes to native SDK or falls back to LiteLLM.
|
||||
This class provides LiteLLM integration for models not covered by native providers.
|
||||
The metaclass (LLMMeta) automatically routes to native providers when appropriate.
|
||||
"""
|
||||
|
||||
Routing priority:
|
||||
1. If 'provider' kwarg is present, use that provider with constants
|
||||
2. If only 'model' kwarg, use constants to infer provider
|
||||
3. If "/" in model name:
|
||||
- Check if prefix is a native provider (openai/anthropic/azure/bedrock/gemini)
|
||||
- If yes, validate model against constants
|
||||
- If valid, route to native SDK; otherwise route to LiteLLM
|
||||
"""
|
||||
if not model or not isinstance(model, str):
|
||||
raise ValueError("Model must be a non-empty string")
|
||||
# LiteLLM-specific fields
|
||||
completion_cost: float | None = Field(None, description="Cost of completion")
|
||||
timeout: float | int | None = Field(None, description="Request timeout")
|
||||
top_p: float | None = Field(None, description="Top-p sampling parameter")
|
||||
n: int | None = Field(None, description="Number of completions to generate")
|
||||
max_completion_tokens: int | None = Field(
|
||||
None, description="Maximum completion tokens"
|
||||
)
|
||||
max_tokens: int | float | None = Field(None, description="Maximum total tokens")
|
||||
presence_penalty: float | None = Field(None, description="Presence penalty")
|
||||
frequency_penalty: float | None = Field(None, description="Frequency penalty")
|
||||
logit_bias: dict[int, float] | None = Field(None, description="Logit bias")
|
||||
response_format: type[BaseModel] | None = Field(
|
||||
None, description="Response format model"
|
||||
)
|
||||
seed: int | None = Field(None, description="Random seed for reproducibility")
|
||||
logprobs: int | None = Field(None, description="Log probabilities to return")
|
||||
top_logprobs: int | None = Field(None, description="Top log probabilities")
|
||||
api_base: str | None = Field(None, description="API base URL (alias for base_url)")
|
||||
api_version: str | None = Field(None, description="API version")
|
||||
callbacks: list[Any] | None = Field(None, description="Callback functions")
|
||||
context_window_size: int = Field(0, description="Context window size in tokens")
|
||||
reasoning_effort: Literal["none", "low", "medium", "high"] | None = Field(
|
||||
None, description="Reasoning effort level"
|
||||
)
|
||||
is_anthropic: bool = Field(False, description="Whether model is from Anthropic")
|
||||
stream: bool = Field(False, description="Whether to stream responses")
|
||||
|
||||
explicit_provider = kwargs.get("provider")
|
||||
|
||||
if explicit_provider:
|
||||
provider = explicit_provider
|
||||
use_native = True
|
||||
model_string = model
|
||||
elif "/" in model:
|
||||
prefix, _, model_part = model.partition("/")
|
||||
|
||||
provider_mapping = {
|
||||
"openai": "openai",
|
||||
"anthropic": "anthropic",
|
||||
"claude": "anthropic",
|
||||
"azure": "azure",
|
||||
"azure_openai": "azure",
|
||||
"google": "gemini",
|
||||
"gemini": "gemini",
|
||||
"bedrock": "bedrock",
|
||||
"aws": "bedrock",
|
||||
}
|
||||
|
||||
canonical_provider = provider_mapping.get(prefix.lower())
|
||||
|
||||
if canonical_provider and cls._validate_model_in_constants(
|
||||
model_part, canonical_provider
|
||||
):
|
||||
provider = canonical_provider
|
||||
use_native = True
|
||||
model_string = model_part
|
||||
else:
|
||||
provider = prefix
|
||||
use_native = False
|
||||
model_string = model_part
|
||||
else:
|
||||
provider = cls._infer_provider_from_model(model)
|
||||
use_native = True
|
||||
model_string = model
|
||||
|
||||
native_class = cls._get_native_provider(provider) if use_native else None
|
||||
if native_class and not is_litellm and provider in SUPPORTED_NATIVE_PROVIDERS:
|
||||
try:
|
||||
# Remove 'provider' from kwargs if it exists to avoid duplicate keyword argument
|
||||
kwargs_copy = {k: v for k, v in kwargs.items() if k != 'provider'}
|
||||
return cast(
|
||||
Self, native_class(model=model_string, provider=provider, **kwargs_copy)
|
||||
)
|
||||
except NotImplementedError:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise ImportError(f"Error importing native provider: {e}") from e
|
||||
|
||||
# FALLBACK to LiteLLM
|
||||
if not LITELLM_AVAILABLE:
|
||||
logger.error("LiteLLM is not available, falling back to LiteLLM")
|
||||
raise ImportError("Fallback to LiteLLM is not available") from None
|
||||
|
||||
instance = object.__new__(cls)
|
||||
super(LLM, instance).__init__(model=model, is_litellm=True, **kwargs)
|
||||
instance.is_litellm = True
|
||||
return instance
|
||||
|
||||
@classmethod
|
||||
def _validate_model_in_constants(cls, model: str, provider: str) -> bool:
|
||||
"""Validate if a model name exists in the provider's constants.
|
||||
def model_post_init(self, __context: Any) -> None:
|
||||
"""Initialize LiteLLM-specific settings after model initialization.
|
||||
|
||||
Args:
|
||||
model: The model name to validate
|
||||
provider: The provider to check against (canonical name)
|
||||
|
||||
Returns:
|
||||
True if the model exists in the provider's constants, False otherwise
|
||||
__context: Pydantic context
|
||||
"""
|
||||
if provider == "openai":
|
||||
return model in OPENAI_MODELS
|
||||
super().model_post_init(__context)
|
||||
|
||||
if provider == "anthropic" or provider == "claude":
|
||||
return model in ANTHROPIC_MODELS
|
||||
# Configure LiteLLM
|
||||
if LITELLM_AVAILABLE:
|
||||
litellm.drop_params = True
|
||||
|
||||
if provider == "gemini":
|
||||
return model in GEMINI_MODELS
|
||||
# Determine if this is an Anthropic model
|
||||
self.is_anthropic = self._is_anthropic_model(self.model)
|
||||
|
||||
if provider == "bedrock":
|
||||
return model in BEDROCK_MODELS
|
||||
|
||||
if provider == "azure":
|
||||
# azure does not provide a list of available models, determine a better way to handle this
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def _infer_provider_from_model(cls, model: str) -> str:
|
||||
"""Infer the provider from the model name.
|
||||
|
||||
Args:
|
||||
model: The model name without provider prefix
|
||||
|
||||
Returns:
|
||||
The inferred provider name, defaults to "openai"
|
||||
"""
|
||||
|
||||
if model in OPENAI_MODELS:
|
||||
return "openai"
|
||||
|
||||
if model in ANTHROPIC_MODELS:
|
||||
return "anthropic"
|
||||
|
||||
if model in GEMINI_MODELS:
|
||||
return "gemini"
|
||||
|
||||
if model in BEDROCK_MODELS:
|
||||
return "bedrock"
|
||||
|
||||
if model in AZURE_MODELS:
|
||||
return "azure"
|
||||
|
||||
return "openai"
|
||||
|
||||
@classmethod
|
||||
def _get_native_provider(cls, provider: str) -> type | None:
|
||||
"""Get native provider class if available."""
|
||||
if provider == "openai":
|
||||
from crewai.llms.providers.openai.completion import OpenAICompletion
|
||||
|
||||
return OpenAICompletion
|
||||
|
||||
if provider == "anthropic" or provider == "claude":
|
||||
from crewai.llms.providers.anthropic.completion import (
|
||||
AnthropicCompletion,
|
||||
)
|
||||
|
||||
return AnthropicCompletion
|
||||
|
||||
if provider == "azure" or provider == "azure_openai":
|
||||
from crewai.llms.providers.azure.completion import AzureCompletion
|
||||
|
||||
return AzureCompletion
|
||||
|
||||
if provider == "google" or provider == "gemini":
|
||||
from crewai.llms.providers.gemini.completion import GeminiCompletion
|
||||
|
||||
return GeminiCompletion
|
||||
|
||||
if provider == "bedrock":
|
||||
from crewai.llms.providers.bedrock.completion import BedrockCompletion
|
||||
|
||||
return BedrockCompletion
|
||||
|
||||
return None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model: str,
|
||||
timeout: float | int | None = None,
|
||||
temperature: float | None = None,
|
||||
top_p: float | None = None,
|
||||
n: int | None = None,
|
||||
stop: str | list[str] | None = None,
|
||||
max_completion_tokens: int | None = None,
|
||||
max_tokens: int | float | None = None,
|
||||
presence_penalty: float | None = None,
|
||||
frequency_penalty: float | None = None,
|
||||
logit_bias: dict[int, float] | None = None,
|
||||
response_format: type[BaseModel] | None = None,
|
||||
seed: int | None = None,
|
||||
logprobs: int | None = None,
|
||||
top_logprobs: int | None = None,
|
||||
base_url: str | None = None,
|
||||
api_base: str | None = None,
|
||||
api_version: str | None = None,
|
||||
api_key: str | None = None,
|
||||
callbacks: list[Any] | None = None,
|
||||
reasoning_effort: Literal["none", "low", "medium", "high"] | None = None,
|
||||
stream: bool = False,
|
||||
interceptor: BaseInterceptor[httpx.Request, httpx.Response] | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize LLM instance.
|
||||
|
||||
Note: This __init__ method is only called for fallback instances.
|
||||
Native provider instances handle their own initialization in their respective classes.
|
||||
"""
|
||||
super().__init__(
|
||||
model=model,
|
||||
temperature=temperature,
|
||||
api_key=api_key,
|
||||
base_url=base_url,
|
||||
timeout=timeout,
|
||||
**kwargs,
|
||||
)
|
||||
self.model = model
|
||||
self.timeout = timeout
|
||||
self.temperature = temperature
|
||||
self.top_p = top_p
|
||||
self.n = n
|
||||
self.max_completion_tokens = max_completion_tokens
|
||||
self.max_tokens = max_tokens
|
||||
self.presence_penalty = presence_penalty
|
||||
self.frequency_penalty = frequency_penalty
|
||||
self.logit_bias = logit_bias
|
||||
self.response_format = response_format
|
||||
self.seed = seed
|
||||
self.logprobs = logprobs
|
||||
self.top_logprobs = top_logprobs
|
||||
self.base_url = base_url
|
||||
self.api_base = api_base
|
||||
self.api_version = api_version
|
||||
self.api_key = api_key
|
||||
self.callbacks = callbacks
|
||||
self.context_window_size = 0
|
||||
self.reasoning_effort = reasoning_effort
|
||||
self.additional_params = kwargs
|
||||
self.is_anthropic = self._is_anthropic_model(model)
|
||||
self.stream = stream
|
||||
self.interceptor = interceptor
|
||||
|
||||
litellm.drop_params = True
|
||||
|
||||
# Normalize self.stop to always be a list[str]
|
||||
if stop is None:
|
||||
self.stop: list[str] = []
|
||||
elif isinstance(stop, str):
|
||||
self.stop = [stop]
|
||||
else:
|
||||
self.stop = stop
|
||||
|
||||
self.set_callbacks(callbacks or [])
|
||||
# Set up callbacks
|
||||
self.set_callbacks(self.callbacks or [])
|
||||
self.set_env_callbacks()
|
||||
|
||||
@staticmethod
|
||||
@@ -1649,7 +1447,7 @@ class LLM(BaseLLM):
|
||||
**filtered_params,
|
||||
)
|
||||
|
||||
def __deepcopy__(self, memo: dict[int, Any] | None) -> LLM:
|
||||
def __deepcopy__(self, memo: dict[int, Any] | None) -> LLM: # type: ignore[override]
|
||||
"""Create a deep copy of the LLM instance."""
|
||||
import copy
|
||||
|
||||
6
lib/crewai/src/crewai/llm/hooks/__init__.py
Normal file
6
lib/crewai/src/crewai/llm/hooks/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
"""Interceptor contracts for crewai"""
|
||||
|
||||
from crewai.llm.hooks.base import BaseInterceptor
|
||||
|
||||
|
||||
__all__ = ["BaseInterceptor"]
|
||||
133
lib/crewai/src/crewai/llm/hooks/base.py
Normal file
133
lib/crewai/src/crewai/llm/hooks/base.py
Normal file
@@ -0,0 +1,133 @@
|
||||
"""Base classes for LLM transport interceptors.
|
||||
|
||||
This module provides abstract base classes for intercepting and modifying
|
||||
outbound and inbound messages at the transport level.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import TYPE_CHECKING, Any, Generic, TypeVar
|
||||
|
||||
from pydantic_core import core_schema
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pydantic import GetCoreSchemaHandler
|
||||
from pydantic_core import CoreSchema
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
U = TypeVar("U")
|
||||
|
||||
|
||||
class BaseInterceptor(ABC, Generic[T, U]):
|
||||
"""Abstract base class for intercepting transport-level messages.
|
||||
|
||||
Provides hooks to intercept and modify outbound and inbound messages
|
||||
at the transport layer.
|
||||
|
||||
Type parameters:
|
||||
T: Outbound message type (e.g., httpx.Request)
|
||||
U: Inbound message type (e.g., httpx.Response)
|
||||
|
||||
Example:
|
||||
>>> import httpx
|
||||
>>> class CustomInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
|
||||
... def on_outbound(self, message: httpx.Request) -> httpx.Request:
|
||||
... message.headers["X-Custom-Header"] = "value"
|
||||
... return message
|
||||
...
|
||||
... def on_inbound(self, message: httpx.Response) -> httpx.Response:
|
||||
... print(f"Status: {message.status_code}")
|
||||
... return message
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def on_outbound(self, message: T) -> T:
|
||||
"""Intercept outbound message before sending.
|
||||
|
||||
Args:
|
||||
message: Outbound message object.
|
||||
|
||||
Returns:
|
||||
Modified message object.
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def on_inbound(self, message: U) -> U:
|
||||
"""Intercept inbound message after receiving.
|
||||
|
||||
Args:
|
||||
message: Inbound message object.
|
||||
|
||||
Returns:
|
||||
Modified message object.
|
||||
"""
|
||||
...
|
||||
|
||||
async def aon_outbound(self, message: T) -> T:
|
||||
"""Async version of on_outbound.
|
||||
|
||||
Args:
|
||||
message: Outbound message object.
|
||||
|
||||
Returns:
|
||||
Modified message object.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def aon_inbound(self, message: U) -> U:
|
||||
"""Async version of on_inbound.
|
||||
|
||||
Args:
|
||||
message: Inbound message object.
|
||||
|
||||
Returns:
|
||||
Modified message object.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@classmethod
|
||||
def __get_pydantic_core_schema__(
|
||||
cls, _source_type: Any, _handler: GetCoreSchemaHandler
|
||||
) -> CoreSchema:
|
||||
"""Generate Pydantic core schema for BaseInterceptor.
|
||||
|
||||
This allows the generic BaseInterceptor to be used in Pydantic models
|
||||
without requiring arbitrary_types_allowed=True. The schema validates
|
||||
that the value is an instance of BaseInterceptor.
|
||||
|
||||
Args:
|
||||
_source_type: The source type being validated (unused).
|
||||
_handler: Handler for generating schemas (unused).
|
||||
|
||||
Returns:
|
||||
A Pydantic core schema that validates BaseInterceptor instances.
|
||||
"""
|
||||
return core_schema.no_info_plain_validator_function(
|
||||
_validate_interceptor,
|
||||
serialization=core_schema.plain_serializer_function_ser_schema(
|
||||
lambda x: x, return_schema=core_schema.any_schema()
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def _validate_interceptor(value: Any) -> BaseInterceptor[T, U]:
|
||||
"""Validate that the value is a BaseInterceptor instance.
|
||||
|
||||
Args:
|
||||
value: The value to validate.
|
||||
|
||||
Returns:
|
||||
The validated BaseInterceptor instance.
|
||||
|
||||
Raises:
|
||||
ValueError: If the value is not a BaseInterceptor instance.
|
||||
"""
|
||||
if not isinstance(value, BaseInterceptor):
|
||||
raise ValueError(
|
||||
f"Expected BaseInterceptor instance, got {type(value).__name__}"
|
||||
)
|
||||
return value
|
||||
123
lib/crewai/src/crewai/llm/hooks/transport.py
Normal file
123
lib/crewai/src/crewai/llm/hooks/transport.py
Normal file
@@ -0,0 +1,123 @@
|
||||
"""HTTP transport implementations for LLM request/response interception.
|
||||
|
||||
This module provides internal transport classes that integrate with BaseInterceptor
|
||||
to enable request/response modification at the transport level.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Iterable
|
||||
from typing import TYPE_CHECKING, TypedDict
|
||||
|
||||
from httpx import (
|
||||
AsyncHTTPTransport as _AsyncHTTPTransport,
|
||||
HTTPTransport as _HTTPTransport,
|
||||
)
|
||||
from typing_extensions import NotRequired, Unpack
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ssl import SSLContext
|
||||
|
||||
from httpx import Limits, Request, Response
|
||||
from httpx._types import CertTypes, ProxyTypes
|
||||
|
||||
from crewai.llm.hooks.base import BaseInterceptor
|
||||
|
||||
|
||||
class HTTPTransportKwargs(TypedDict, total=False):
|
||||
"""Typed dictionary for httpx.HTTPTransport initialization parameters.
|
||||
|
||||
These parameters configure the underlying HTTP transport behavior including
|
||||
SSL verification, proxies, connection limits, and low-level socket options.
|
||||
"""
|
||||
|
||||
verify: bool | str | SSLContext
|
||||
cert: NotRequired[CertTypes]
|
||||
trust_env: bool
|
||||
http1: bool
|
||||
http2: bool
|
||||
limits: Limits
|
||||
proxy: NotRequired[ProxyTypes]
|
||||
uds: NotRequired[str]
|
||||
local_address: NotRequired[str]
|
||||
retries: int
|
||||
socket_options: NotRequired[
|
||||
Iterable[
|
||||
tuple[int, int, int]
|
||||
| tuple[int, int, bytes | bytearray]
|
||||
| tuple[int, int, None, int]
|
||||
]
|
||||
]
|
||||
|
||||
|
||||
class HTTPTransport(_HTTPTransport):
|
||||
"""HTTP transport that uses an interceptor for request/response modification.
|
||||
|
||||
This transport is used internally when a user provides a BaseInterceptor.
|
||||
Users should not instantiate this class directly - instead, pass an interceptor
|
||||
to the LLM client and this transport will be created automatically.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
interceptor: BaseInterceptor[Request, Response],
|
||||
**kwargs: Unpack[HTTPTransportKwargs],
|
||||
) -> None:
|
||||
"""Initialize transport with interceptor.
|
||||
|
||||
Args:
|
||||
interceptor: HTTP interceptor for modifying raw request/response objects.
|
||||
**kwargs: HTTPTransport configuration parameters (verify, cert, proxy, etc.).
|
||||
"""
|
||||
super().__init__(**kwargs)
|
||||
self.interceptor = interceptor
|
||||
|
||||
def handle_request(self, request: Request) -> Response:
|
||||
"""Handle request with interception.
|
||||
|
||||
Args:
|
||||
request: The HTTP request to handle.
|
||||
|
||||
Returns:
|
||||
The HTTP response.
|
||||
"""
|
||||
request = self.interceptor.on_outbound(request)
|
||||
response = super().handle_request(request)
|
||||
return self.interceptor.on_inbound(response)
|
||||
|
||||
|
||||
class AsyncHTTPTransport(_AsyncHTTPTransport):
|
||||
"""Async HTTP transport that uses an interceptor for request/response modification.
|
||||
|
||||
This transport is used internally when a user provides a BaseInterceptor.
|
||||
Users should not instantiate this class directly - instead, pass an interceptor
|
||||
to the LLM client and this transport will be created automatically.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
interceptor: BaseInterceptor[Request, Response],
|
||||
**kwargs: Unpack[HTTPTransportKwargs],
|
||||
) -> None:
|
||||
"""Initialize async transport with interceptor.
|
||||
|
||||
Args:
|
||||
interceptor: HTTP interceptor for modifying raw request/response objects.
|
||||
**kwargs: HTTPTransport configuration parameters (verify, cert, proxy, etc.).
|
||||
"""
|
||||
super().__init__(**kwargs)
|
||||
self.interceptor = interceptor
|
||||
|
||||
async def handle_async_request(self, request: Request) -> Response:
|
||||
"""Handle async request with interception.
|
||||
|
||||
Args:
|
||||
request: The HTTP request to handle.
|
||||
|
||||
Returns:
|
||||
The HTTP response.
|
||||
"""
|
||||
request = await self.interceptor.aon_outbound(request)
|
||||
response = await super().handle_async_request(request)
|
||||
return await self.interceptor.aon_inbound(response)
|
||||
14
lib/crewai/src/crewai/llm/internal/constants.py
Normal file
14
lib/crewai/src/crewai/llm/internal/constants.py
Normal file
@@ -0,0 +1,14 @@
|
||||
from crewai.llm.constants import SupportedNativeProviders
|
||||
|
||||
|
||||
PROVIDER_MAPPING: dict[str, SupportedNativeProviders] = {
|
||||
"openai": "openai",
|
||||
"anthropic": "anthropic",
|
||||
"claude": "anthropic",
|
||||
"azure": "azure",
|
||||
"azure_openai": "azure",
|
||||
"google": "gemini",
|
||||
"gemini": "gemini",
|
||||
"bedrock": "bedrock",
|
||||
"aws": "bedrock",
|
||||
}
|
||||
251
lib/crewai/src/crewai/llm/internal/meta.py
Normal file
251
lib/crewai/src/crewai/llm/internal/meta.py
Normal file
@@ -0,0 +1,251 @@
|
||||
"""Metaclass for LLM provider routing.
|
||||
|
||||
This metaclass enables automatic routing to native provider implementations
|
||||
based on the model parameter at instantiation time.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any, cast
|
||||
|
||||
from pydantic import ConfigDict
|
||||
from pydantic._internal._model_construction import ModelMetaclass
|
||||
|
||||
from crewai.llm.constants import (
|
||||
ANTHROPIC_MODELS,
|
||||
AZURE_MODELS,
|
||||
BEDROCK_MODELS,
|
||||
GEMINI_MODELS,
|
||||
OPENAI_MODELS,
|
||||
SUPPORTED_NATIVE_PROVIDERS,
|
||||
SupportedModels,
|
||||
SupportedNativeProviders,
|
||||
)
|
||||
from crewai.llm.internal.constants import PROVIDER_MAPPING
|
||||
|
||||
|
||||
class LLMMeta(ModelMetaclass):
|
||||
"""Metaclass for LLM that handles provider routing.
|
||||
|
||||
This metaclass intercepts LLM instantiation and routes to the appropriate
|
||||
native provider implementation based on the model parameter.
|
||||
"""
|
||||
|
||||
def __new__(
|
||||
mcs,
|
||||
name: str,
|
||||
bases: tuple[type, ...],
|
||||
namespace: dict[str, Any],
|
||||
**kwargs: Any,
|
||||
) -> type:
|
||||
"""Create new LLM class with proper model_config for custom LLMs.
|
||||
|
||||
Args:
|
||||
name: Class name
|
||||
bases: Base classes
|
||||
namespace: Class namespace
|
||||
**kwargs: Additional arguments
|
||||
|
||||
Returns:
|
||||
New class
|
||||
"""
|
||||
if name != "BaseLLM" and any(
|
||||
base.__name__ in ("BaseLLM", "LLM") for base in bases
|
||||
):
|
||||
if "model_config" not in namespace:
|
||||
namespace["model_config"] = ConfigDict(
|
||||
extra="allow", populate_by_name=True
|
||||
)
|
||||
elif isinstance(namespace["model_config"], dict):
|
||||
config_dict = cast(
|
||||
ConfigDict, cast(object, dict(namespace["model_config"]))
|
||||
)
|
||||
config_dict.setdefault("extra", "allow")
|
||||
config_dict.setdefault("populate_by_name", True)
|
||||
namespace["model_config"] = ConfigDict(**config_dict)
|
||||
|
||||
return super().__new__(mcs, name, bases, namespace)
|
||||
|
||||
def __call__(cls, *args: Any, **kwargs: Any) -> Any: # noqa: N805
|
||||
"""Route to appropriate provider implementation at instantiation time.
|
||||
|
||||
Args:
|
||||
*args: Positional arguments (model should be first for LLM class)
|
||||
**kwargs: Keyword arguments including model, is_litellm, etc.
|
||||
|
||||
Returns:
|
||||
Instance of the appropriate provider class or LLM class
|
||||
|
||||
Raises:
|
||||
ValueError: If model is not a valid string
|
||||
"""
|
||||
if cls.__name__ != "LLM":
|
||||
return super().__call__(*args, **kwargs)
|
||||
|
||||
model = cast(
|
||||
str | SupportedModels | None,
|
||||
(kwargs.get("model") or (args[0] if args else None)),
|
||||
)
|
||||
is_litellm = kwargs.get("is_litellm", False)
|
||||
|
||||
if not model or not isinstance(model, str):
|
||||
raise ValueError("Model must be a non-empty string")
|
||||
|
||||
if args and not kwargs.get("model"):
|
||||
kwargs["model"] = cast(SupportedModels, args[0])
|
||||
_ = args[1:]
|
||||
explicit_provider = cast(SupportedNativeProviders, kwargs.get("provider"))
|
||||
|
||||
if explicit_provider:
|
||||
provider = explicit_provider
|
||||
use_native = True
|
||||
model_string = model
|
||||
elif "/" in model:
|
||||
prefix, _, model_part = cast(
|
||||
tuple[SupportedNativeProviders, Any, SupportedModels],
|
||||
model.partition("/"),
|
||||
)
|
||||
|
||||
canonical_provider = PROVIDER_MAPPING.get(prefix.lower())
|
||||
|
||||
if canonical_provider and cls._validate_model_in_constants(
|
||||
model_part, canonical_provider
|
||||
):
|
||||
provider = canonical_provider
|
||||
use_native = True
|
||||
model_string = model_part
|
||||
else:
|
||||
provider = prefix
|
||||
use_native = False
|
||||
model_string = model_part
|
||||
else:
|
||||
provider = cls._infer_provider_from_model(model)
|
||||
use_native = True
|
||||
model_string = model
|
||||
|
||||
native_class = cls._get_native_provider(provider) if use_native else None
|
||||
if native_class and not is_litellm and provider in SUPPORTED_NATIVE_PROVIDERS:
|
||||
try:
|
||||
kwargs_copy = {
|
||||
k: v for k, v in kwargs.items() if k not in ("provider", "model")
|
||||
}
|
||||
return native_class(
|
||||
model=model_string, provider=provider, **kwargs_copy
|
||||
)
|
||||
except NotImplementedError:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise ImportError(f"Error importing native provider: {e}") from e
|
||||
|
||||
try:
|
||||
import litellm # noqa: F401
|
||||
except ImportError:
|
||||
logging.error("LiteLLM is not available, falling back to LiteLLM")
|
||||
raise ImportError("Fallback to LiteLLM is not available") from None
|
||||
|
||||
kwargs_copy = {
|
||||
k: v for k, v in kwargs.items() if k not in ("model", "is_litellm")
|
||||
}
|
||||
return super().__call__(model=model, is_litellm=True, **kwargs_copy)
|
||||
|
||||
@staticmethod
|
||||
def _validate_model_in_constants(
|
||||
model: SupportedModels, provider: SupportedNativeProviders | None
|
||||
) -> bool:
|
||||
"""Validate if a model name exists in the provider's constants.
|
||||
|
||||
Args:
|
||||
model: The model name to validate
|
||||
provider: The provider to check against (canonical name)
|
||||
|
||||
Returns:
|
||||
True if the model exists in the provider's constants, False otherwise
|
||||
"""
|
||||
|
||||
if provider == "openai":
|
||||
return model in OPENAI_MODELS
|
||||
|
||||
if provider == "anthropic" or provider == "claude":
|
||||
return model in ANTHROPIC_MODELS
|
||||
|
||||
if provider == "gemini":
|
||||
return model in GEMINI_MODELS
|
||||
|
||||
if provider == "bedrock":
|
||||
return model in BEDROCK_MODELS
|
||||
|
||||
if provider == "azure":
|
||||
# azure does not provide a list of available models
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def _infer_provider_from_model(
|
||||
model: SupportedModels | str,
|
||||
) -> SupportedNativeProviders:
|
||||
"""Infer the provider from the model name.
|
||||
|
||||
Args:
|
||||
model: The model name without provider prefix
|
||||
|
||||
Returns:
|
||||
The inferred provider name, defaults to "openai"
|
||||
"""
|
||||
|
||||
if model in OPENAI_MODELS:
|
||||
return "openai"
|
||||
|
||||
if model in ANTHROPIC_MODELS:
|
||||
return "anthropic"
|
||||
|
||||
if model in GEMINI_MODELS:
|
||||
return "gemini"
|
||||
|
||||
if model in BEDROCK_MODELS:
|
||||
return "bedrock"
|
||||
|
||||
if model in AZURE_MODELS:
|
||||
return "azure"
|
||||
|
||||
return "openai"
|
||||
|
||||
@staticmethod
|
||||
def _get_native_provider(provider: SupportedNativeProviders | None) -> type | None:
|
||||
"""Get native provider class if available.
|
||||
|
||||
Args:
|
||||
provider: The provider name
|
||||
|
||||
Returns:
|
||||
The provider class or None if not available
|
||||
"""
|
||||
if provider == "openai":
|
||||
from crewai.llm.providers.openai.completion import OpenAICompletion
|
||||
|
||||
return OpenAICompletion
|
||||
|
||||
if provider == "anthropic" or provider == "claude":
|
||||
from crewai.llm.providers.anthropic.completion import (
|
||||
AnthropicCompletion,
|
||||
)
|
||||
|
||||
return AnthropicCompletion
|
||||
|
||||
if provider == "azure" or provider == "azure_openai":
|
||||
from crewai.llm.providers.azure.completion import AzureCompletion
|
||||
|
||||
return AzureCompletion
|
||||
|
||||
if provider == "google" or provider == "gemini":
|
||||
from crewai.llm.providers.gemini.completion import GeminiCompletion
|
||||
|
||||
return GeminiCompletion
|
||||
|
||||
if provider == "bedrock":
|
||||
from crewai.llm.providers.bedrock.completion import BedrockCompletion
|
||||
|
||||
return BedrockCompletion
|
||||
|
||||
return None
|
||||
@@ -2,14 +2,18 @@ from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from typing import TYPE_CHECKING, Any, cast
|
||||
|
||||
from pydantic import BaseModel
|
||||
from dotenv import load_dotenv
|
||||
import httpx
|
||||
from pydantic import BaseModel, Field, PrivateAttr, model_validator
|
||||
from typing_extensions import Self
|
||||
|
||||
from crewai.events.types.llm_events import LLMCallType
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.llms.hooks.transport import HTTPTransport
|
||||
from crewai.llm.base_llm import BaseLLM
|
||||
from crewai.llm.core import CONTEXT_WINDOW_USAGE_RATIO
|
||||
from crewai.llm.hooks.transport import HTTPTransport
|
||||
from crewai.llm.providers.utils.common import safe_tool_conversion
|
||||
from crewai.utilities.agent_utils import is_context_length_exceeded
|
||||
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
LLMContextLengthExceededError,
|
||||
@@ -18,114 +22,85 @@ from crewai.utilities.types import LLMMessage
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.llms.hooks.base import BaseInterceptor
|
||||
from anthropic.types import Message
|
||||
|
||||
from crewai.agent.core import Agent
|
||||
from crewai.task import Task
|
||||
|
||||
|
||||
try:
|
||||
from anthropic import Anthropic
|
||||
from anthropic.types import Message
|
||||
from anthropic.types.tool_use_block import ToolUseBlock
|
||||
import httpx
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
'Anthropic native provider not available, to install: uv add "crewai[anthropic]"'
|
||||
) from None
|
||||
|
||||
|
||||
load_dotenv()
|
||||
|
||||
|
||||
class AnthropicCompletion(BaseLLM):
|
||||
"""Anthropic native completion implementation.
|
||||
|
||||
This class provides direct integration with the Anthropic Python SDK,
|
||||
offering native tool use, streaming support, and proper message formatting.
|
||||
|
||||
Attributes:
|
||||
model: Anthropic model name (e.g., 'claude-3-5-sonnet-20241022')
|
||||
base_url: Custom base URL for Anthropic API
|
||||
timeout: Request timeout in seconds
|
||||
max_retries: Maximum number of retries
|
||||
max_tokens: Maximum tokens in response (required for Anthropic)
|
||||
top_p: Nucleus sampling parameter
|
||||
stream: Enable streaming responses
|
||||
client_params: Additional parameters for the Anthropic client
|
||||
interceptor: HTTP interceptor for modifying requests/responses at transport level
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model: str = "claude-3-5-sonnet-20241022",
|
||||
api_key: str | None = None,
|
||||
base_url: str | None = None,
|
||||
timeout: float | None = None,
|
||||
max_retries: int = 2,
|
||||
temperature: float | None = None,
|
||||
max_tokens: int = 4096, # Required for Anthropic
|
||||
top_p: float | None = None,
|
||||
stop_sequences: list[str] | None = None,
|
||||
stream: bool = False,
|
||||
client_params: dict[str, Any] | None = None,
|
||||
interceptor: BaseInterceptor[httpx.Request, httpx.Response] | None = None,
|
||||
**kwargs: Any,
|
||||
):
|
||||
"""Initialize Anthropic chat completion client.
|
||||
base_url: str | None = Field(
|
||||
default=None, description="Custom base URL for Anthropic API"
|
||||
)
|
||||
timeout: float | None = Field(
|
||||
default=None, description="Request timeout in seconds"
|
||||
)
|
||||
max_retries: int = Field(default=2, description="Maximum number of retries")
|
||||
max_tokens: int = Field(
|
||||
default=4096, description="Maximum tokens in response (required for Anthropic)"
|
||||
)
|
||||
top_p: float | None = Field(default=None, description="Nucleus sampling parameter")
|
||||
stream: bool = Field(default=False, description="Enable streaming responses")
|
||||
client_params: dict[str, Any] | None = Field(
|
||||
default_factory=dict, description="Additional Anthropic client parameters"
|
||||
)
|
||||
_client: Anthropic = PrivateAttr(default=None) # type: ignore[assignment]
|
||||
|
||||
Args:
|
||||
model: Anthropic model name (e.g., 'claude-3-5-sonnet-20241022')
|
||||
api_key: Anthropic API key (defaults to ANTHROPIC_API_KEY env var)
|
||||
base_url: Custom base URL for Anthropic API
|
||||
timeout: Request timeout in seconds
|
||||
max_retries: Maximum number of retries
|
||||
temperature: Sampling temperature (0-1)
|
||||
max_tokens: Maximum tokens in response (required for Anthropic)
|
||||
top_p: Nucleus sampling parameter
|
||||
stop_sequences: Stop sequences (Anthropic uses stop_sequences, not stop)
|
||||
stream: Enable streaming responses
|
||||
client_params: Additional parameters for the Anthropic client
|
||||
interceptor: HTTP interceptor for modifying requests/responses at transport level.
|
||||
**kwargs: Additional parameters
|
||||
"""
|
||||
super().__init__(
|
||||
model=model, temperature=temperature, stop=stop_sequences or [], **kwargs
|
||||
)
|
||||
_is_claude_3: bool = PrivateAttr(default=False)
|
||||
_supports_tools: bool = PrivateAttr(default=False)
|
||||
|
||||
# Client params
|
||||
self.interceptor = interceptor
|
||||
self.client_params = client_params
|
||||
self.base_url = base_url
|
||||
self.timeout = timeout
|
||||
self.max_retries = max_retries
|
||||
@model_validator(mode="after")
|
||||
def setup_client(self) -> Self:
|
||||
"""Initialize the Anthropic client and model-specific settings."""
|
||||
self._client = Anthropic(**self._get_client_params())
|
||||
|
||||
self.client = Anthropic(**self._get_client_params())
|
||||
self._is_claude_3 = "claude-3" in self.model.lower()
|
||||
self._supports_tools = self._is_claude_3
|
||||
|
||||
# Store completion parameters
|
||||
self.max_tokens = max_tokens
|
||||
self.top_p = top_p
|
||||
self.stream = stream
|
||||
self.stop_sequences = stop_sequences or []
|
||||
|
||||
# Model-specific settings
|
||||
self.is_claude_3 = "claude-3" in model.lower()
|
||||
self.supports_tools = self.is_claude_3 # Claude 3+ supports tool use
|
||||
return self
|
||||
|
||||
@property
|
||||
def stop(self) -> list[str]:
|
||||
"""Get stop sequences sent to the API."""
|
||||
return self.stop_sequences
|
||||
def is_claude_3(self) -> bool:
|
||||
"""Check if model is Claude 3."""
|
||||
return self._is_claude_3
|
||||
|
||||
@stop.setter
|
||||
def stop(self, value: list[str] | str | None) -> None:
|
||||
"""Set stop sequences.
|
||||
|
||||
Synchronizes stop_sequences to ensure values set by CrewAgentExecutor
|
||||
are properly sent to the Anthropic API.
|
||||
|
||||
Args:
|
||||
value: Stop sequences as a list, single string, or None
|
||||
"""
|
||||
if value is None:
|
||||
self.stop_sequences = []
|
||||
elif isinstance(value, str):
|
||||
self.stop_sequences = [value]
|
||||
elif isinstance(value, list):
|
||||
self.stop_sequences = value
|
||||
else:
|
||||
self.stop_sequences = []
|
||||
@property
|
||||
def supports_tools(self) -> bool:
|
||||
"""Check if model supports tools."""
|
||||
return self._supports_tools
|
||||
|
||||
def _get_client_params(self) -> dict[str, Any]:
|
||||
"""Get client parameters."""
|
||||
|
||||
if self.api_key is None:
|
||||
self.api_key = os.getenv("ANTHROPIC_API_KEY")
|
||||
if self.api_key is None:
|
||||
raise ValueError("ANTHROPIC_API_KEY is required")
|
||||
|
||||
client_params = {
|
||||
"api_key": self.api_key,
|
||||
"base_url": self.base_url,
|
||||
@@ -149,8 +124,8 @@ class AnthropicCompletion(BaseLLM):
|
||||
tools: list[dict[str, Any]] | None = None,
|
||||
callbacks: list[Any] | None = None,
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str | Any:
|
||||
"""Call Anthropic messages API.
|
||||
@@ -245,8 +220,8 @@ class AnthropicCompletion(BaseLLM):
|
||||
params["temperature"] = self.temperature
|
||||
if self.top_p is not None:
|
||||
params["top_p"] = self.top_p
|
||||
if self.stop_sequences:
|
||||
params["stop_sequences"] = self.stop_sequences
|
||||
if self.stop:
|
||||
params["stop_sequences"] = self.stop
|
||||
|
||||
# Handle tools for Claude 3+
|
||||
if tools and self.supports_tools:
|
||||
@@ -266,8 +241,6 @@ class AnthropicCompletion(BaseLLM):
|
||||
continue
|
||||
|
||||
try:
|
||||
from crewai.llms.providers.utils.common import safe_tool_conversion
|
||||
|
||||
name, description, parameters = safe_tool_conversion(tool, "Anthropic")
|
||||
except (ImportError, KeyError, ValueError) as e:
|
||||
logging.error(f"Error converting tool to Anthropic format: {e}")
|
||||
@@ -341,8 +314,8 @@ class AnthropicCompletion(BaseLLM):
|
||||
self,
|
||||
params: dict[str, Any],
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str | Any:
|
||||
"""Handle non-streaming message completion."""
|
||||
@@ -357,7 +330,7 @@ class AnthropicCompletion(BaseLLM):
|
||||
params["tool_choice"] = {"type": "tool", "name": "structured_output"}
|
||||
|
||||
try:
|
||||
response: Message = self.client.messages.create(**params)
|
||||
response: Message = self._client.messages.create(**params)
|
||||
|
||||
except Exception as e:
|
||||
if is_context_length_exceeded(e):
|
||||
@@ -429,8 +402,8 @@ class AnthropicCompletion(BaseLLM):
|
||||
self,
|
||||
params: dict[str, Any],
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str:
|
||||
"""Handle streaming message completion."""
|
||||
@@ -451,7 +424,7 @@ class AnthropicCompletion(BaseLLM):
|
||||
stream_params = {k: v for k, v in params.items() if k != "stream"}
|
||||
|
||||
# Make streaming API call
|
||||
with self.client.messages.stream(**stream_params) as stream:
|
||||
with self._client.messages.stream(**stream_params) as stream:
|
||||
for event in stream:
|
||||
if hasattr(event, "delta") and hasattr(event.delta, "text"):
|
||||
text_delta = event.delta.text
|
||||
@@ -525,8 +498,8 @@ class AnthropicCompletion(BaseLLM):
|
||||
tool_uses: list[ToolUseBlock],
|
||||
params: dict[str, Any],
|
||||
available_functions: dict[str, Any],
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
) -> str:
|
||||
"""Handle the complete tool use conversation flow.
|
||||
|
||||
@@ -579,7 +552,7 @@ class AnthropicCompletion(BaseLLM):
|
||||
|
||||
try:
|
||||
# Send tool results back to Claude for final response
|
||||
final_response: Message = self.client.messages.create(**follow_up_params)
|
||||
final_response: Message = self._client.messages.create(**follow_up_params)
|
||||
|
||||
# Track token usage for follow-up call
|
||||
follow_up_usage = self._extract_anthropic_token_usage(final_response)
|
||||
@@ -636,7 +609,6 @@ class AnthropicCompletion(BaseLLM):
|
||||
|
||||
def get_context_window_size(self) -> int:
|
||||
"""Get the context window size for the model."""
|
||||
from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO
|
||||
|
||||
# Context window sizes for Anthropic models
|
||||
context_windows = {
|
||||
@@ -5,8 +5,12 @@ import logging
|
||||
import os
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from pydantic import BaseModel
|
||||
from dotenv import load_dotenv
|
||||
from pydantic import BaseModel, Field, PrivateAttr, model_validator
|
||||
from typing_extensions import Self
|
||||
|
||||
from crewai.llm.core import CONTEXT_WINDOW_USAGE_RATIO, LLM_CONTEXT_WINDOW_SIZES
|
||||
from crewai.llm.providers.utils.common import safe_tool_conversion
|
||||
from crewai.utilities.agent_utils import is_context_length_exceeded
|
||||
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
LLMContextLengthExceededError,
|
||||
@@ -15,7 +19,8 @@ from crewai.utilities.types import LLMMessage
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.llms.hooks.base import BaseInterceptor
|
||||
from crewai.agent.core import Agent
|
||||
from crewai.task import Task
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
|
||||
|
||||
@@ -36,7 +41,7 @@ try:
|
||||
)
|
||||
|
||||
from crewai.events.types.llm_events import LLMCallType
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.llm.base_llm import BaseLLM
|
||||
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
@@ -44,111 +49,109 @@ except ImportError:
|
||||
) from None
|
||||
|
||||
|
||||
load_dotenv()
|
||||
|
||||
|
||||
class AzureCompletion(BaseLLM):
|
||||
"""Azure AI Inference native completion implementation.
|
||||
|
||||
This class provides direct integration with the Azure AI Inference Python SDK,
|
||||
offering native function calling, streaming support, and proper Azure authentication.
|
||||
|
||||
Attributes:
|
||||
model: Azure deployment name or model name
|
||||
endpoint: Azure endpoint URL
|
||||
api_version: Azure API version
|
||||
timeout: Request timeout in seconds
|
||||
max_retries: Maximum number of retries
|
||||
top_p: Nucleus sampling parameter
|
||||
frequency_penalty: Frequency penalty (-2 to 2)
|
||||
presence_penalty: Presence penalty (-2 to 2)
|
||||
max_tokens: Maximum tokens in response
|
||||
stream: Enable streaming responses
|
||||
interceptor: HTTP interceptor (not yet supported for Azure)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model: str,
|
||||
api_key: str | None = None,
|
||||
endpoint: str | None = None,
|
||||
api_version: str | None = None,
|
||||
timeout: float | None = None,
|
||||
max_retries: int = 2,
|
||||
temperature: float | None = None,
|
||||
top_p: float | None = None,
|
||||
frequency_penalty: float | None = None,
|
||||
presence_penalty: float | None = None,
|
||||
max_tokens: int | None = None,
|
||||
stop: list[str] | None = None,
|
||||
stream: bool = False,
|
||||
interceptor: BaseInterceptor[Any, Any] | None = None,
|
||||
**kwargs: Any,
|
||||
):
|
||||
"""Initialize Azure AI Inference chat completion client.
|
||||
endpoint: str = Field( # type: ignore[assignment]
|
||||
default_factory=lambda: os.getenv("AZURE_ENDPOINT")
|
||||
or os.getenv("AZURE_OPENAI_ENDPOINT")
|
||||
or os.getenv("AZURE_API_BASE"),
|
||||
description="Azure endpoint URL (defaults to AZURE_ENDPOINT env var)",
|
||||
)
|
||||
api_version: str = Field(
|
||||
default_factory=lambda: os.getenv("AZURE_API_VERSION", "2024-06-01"),
|
||||
description="Azure API version (defaults to AZURE_API_VERSION env var or 2024-06-01)",
|
||||
)
|
||||
timeout: float | None = Field(
|
||||
default=None, description="Request timeout in seconds"
|
||||
)
|
||||
max_retries: int = Field(default=2, description="Maximum number of retries")
|
||||
top_p: float | None = Field(default=None, description="Nucleus sampling parameter")
|
||||
frequency_penalty: float | None = Field(
|
||||
default=None, le=2.0, ge=-2.0, description="Frequency penalty (-2 to 2)"
|
||||
)
|
||||
presence_penalty: float | None = Field(
|
||||
default=None, le=2.0, ge=-2.0, description="Presence penalty (-2 to 2)"
|
||||
)
|
||||
max_tokens: int | None = Field(
|
||||
default=None, description="Maximum tokens in response"
|
||||
)
|
||||
stream: bool = Field(default=False, description="Enable streaming responses")
|
||||
_client: ChatCompletionsClient = PrivateAttr(default=None) # type: ignore[assignment]
|
||||
|
||||
Args:
|
||||
model: Azure deployment name or model name
|
||||
api_key: Azure API key (defaults to AZURE_API_KEY env var)
|
||||
endpoint: Azure endpoint URL (defaults to AZURE_ENDPOINT env var)
|
||||
api_version: Azure API version (defaults to AZURE_API_VERSION env var)
|
||||
timeout: Request timeout in seconds
|
||||
max_retries: Maximum number of retries
|
||||
temperature: Sampling temperature (0-2)
|
||||
top_p: Nucleus sampling parameter
|
||||
frequency_penalty: Frequency penalty (-2 to 2)
|
||||
presence_penalty: Presence penalty (-2 to 2)
|
||||
max_tokens: Maximum tokens in response
|
||||
stop: Stop sequences
|
||||
stream: Enable streaming responses
|
||||
interceptor: HTTP interceptor (not yet supported for Azure).
|
||||
**kwargs: Additional parameters
|
||||
"""
|
||||
if interceptor is not None:
|
||||
_is_openai_model: bool = PrivateAttr(default=False)
|
||||
_is_azure_openai_endpoint: bool = PrivateAttr(default=False)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def setup_client(self) -> Self:
|
||||
"""Initialize the Azure client and validate configuration."""
|
||||
if self.interceptor is not None:
|
||||
raise NotImplementedError(
|
||||
"HTTP interceptors are not yet supported for Azure AI Inference provider. "
|
||||
"Interceptors are currently supported for OpenAI and Anthropic providers only."
|
||||
)
|
||||
|
||||
super().__init__(
|
||||
model=model, temperature=temperature, stop=stop or [], **kwargs
|
||||
)
|
||||
|
||||
self.api_key = api_key or os.getenv("AZURE_API_KEY")
|
||||
self.endpoint = (
|
||||
endpoint
|
||||
or os.getenv("AZURE_ENDPOINT")
|
||||
or os.getenv("AZURE_OPENAI_ENDPOINT")
|
||||
or os.getenv("AZURE_API_BASE")
|
||||
)
|
||||
self.api_version = api_version or os.getenv("AZURE_API_VERSION") or "2024-06-01"
|
||||
self.timeout = timeout
|
||||
self.max_retries = max_retries
|
||||
if not self.api_key:
|
||||
self.api_key = os.getenv("AZURE_API_KEY")
|
||||
|
||||
if not self.api_key:
|
||||
raise ValueError(
|
||||
"Azure API key is required. Set AZURE_API_KEY environment variable or pass api_key parameter."
|
||||
)
|
||||
if not self.endpoint:
|
||||
raise ValueError(
|
||||
"Azure endpoint is required. Set AZURE_ENDPOINT environment variable or pass endpoint parameter."
|
||||
)
|
||||
|
||||
# Validate and potentially fix Azure OpenAI endpoint URL
|
||||
self.endpoint = self._validate_and_fix_endpoint(self.endpoint, model)
|
||||
self.endpoint = self._validate_and_fix_endpoint(self.endpoint, self.model)
|
||||
|
||||
# Build client kwargs
|
||||
client_kwargs = {
|
||||
client_kwargs: dict[str, Any] = {
|
||||
"endpoint": self.endpoint,
|
||||
"credential": AzureKeyCredential(self.api_key),
|
||||
}
|
||||
|
||||
# Add api_version if specified (primarily for Azure OpenAI endpoints)
|
||||
if self.api_version:
|
||||
client_kwargs["api_version"] = self.api_version
|
||||
|
||||
self.client = ChatCompletionsClient(**client_kwargs) # type: ignore[arg-type]
|
||||
self._client = ChatCompletionsClient(**client_kwargs)
|
||||
|
||||
self.top_p = top_p
|
||||
self.frequency_penalty = frequency_penalty
|
||||
self.presence_penalty = presence_penalty
|
||||
self.max_tokens = max_tokens
|
||||
self.stream = stream
|
||||
|
||||
self.is_openai_model = any(
|
||||
prefix in model.lower() for prefix in ["gpt-", "o1-", "text-"]
|
||||
self._is_openai_model = any(
|
||||
prefix in self.model.lower() for prefix in ["gpt-", "o1-", "text-"]
|
||||
)
|
||||
|
||||
self.is_azure_openai_endpoint = (
|
||||
self._is_azure_openai_endpoint = (
|
||||
"openai.azure.com" in self.endpoint
|
||||
and "/openai/deployments/" in self.endpoint
|
||||
)
|
||||
|
||||
def _validate_and_fix_endpoint(self, endpoint: str, model: str) -> str:
|
||||
return self
|
||||
|
||||
@property
|
||||
def is_openai_model(self) -> bool:
|
||||
"""Check if model is an OpenAI model."""
|
||||
return self._is_openai_model
|
||||
|
||||
@property
|
||||
def is_azure_openai_endpoint(self) -> bool:
|
||||
"""Check if endpoint is an Azure OpenAI endpoint."""
|
||||
return self._is_azure_openai_endpoint
|
||||
|
||||
def _validate_and_fix_endpoint(self, endpoint: str | None, model: str) -> str:
|
||||
"""Validate and fix Azure endpoint URL format.
|
||||
|
||||
Azure OpenAI endpoints should be in the format:
|
||||
@@ -160,7 +163,15 @@ class AzureCompletion(BaseLLM):
|
||||
|
||||
Returns:
|
||||
Validated and potentially corrected endpoint URL
|
||||
|
||||
Raises:
|
||||
ValueError: If endpoint is None or empty
|
||||
"""
|
||||
if not endpoint:
|
||||
raise ValueError(
|
||||
"Azure endpoint is required. Set AZURE_ENDPOINT environment variable or pass endpoint parameter."
|
||||
)
|
||||
|
||||
if "openai.azure.com" in endpoint and "/openai/deployments/" not in endpoint:
|
||||
endpoint = endpoint.rstrip("/")
|
||||
|
||||
@@ -177,8 +188,8 @@ class AzureCompletion(BaseLLM):
|
||||
tools: list[dict[str, BaseTool]] | None = None,
|
||||
callbacks: list[Any] | None = None,
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str | Any:
|
||||
"""Call Azure AI Inference chat completions API.
|
||||
@@ -317,8 +328,6 @@ class AzureCompletion(BaseLLM):
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Convert CrewAI tool format to Azure OpenAI function calling format."""
|
||||
|
||||
from crewai.llms.providers.utils.common import safe_tool_conversion
|
||||
|
||||
azure_tools = []
|
||||
|
||||
for tool in tools:
|
||||
@@ -371,14 +380,14 @@ class AzureCompletion(BaseLLM):
|
||||
self,
|
||||
params: dict[str, Any],
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str | Any:
|
||||
"""Handle non-streaming chat completion."""
|
||||
# Make API call
|
||||
try:
|
||||
response: ChatCompletions = self.client.complete(**params)
|
||||
response: ChatCompletions = self._client.complete(**params)
|
||||
|
||||
if not response.choices:
|
||||
raise ValueError("No choices returned from Azure API")
|
||||
@@ -467,8 +476,8 @@ class AzureCompletion(BaseLLM):
|
||||
self,
|
||||
params: dict[str, Any],
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str:
|
||||
"""Handle streaming chat completion."""
|
||||
@@ -476,7 +485,7 @@ class AzureCompletion(BaseLLM):
|
||||
tool_calls = {}
|
||||
|
||||
# Make streaming API call
|
||||
for update in self.client.complete(**params):
|
||||
for update in self._client.complete(**params):
|
||||
if isinstance(update, StreamingChatCompletionsUpdate):
|
||||
if update.choices:
|
||||
choice = update.choices[0]
|
||||
@@ -554,7 +563,6 @@ class AzureCompletion(BaseLLM):
|
||||
|
||||
def get_context_window_size(self) -> int:
|
||||
"""Get the context window size for the model."""
|
||||
from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO, LLM_CONTEXT_WINDOW_SIZES
|
||||
|
||||
min_context = 1024
|
||||
max_context = 2097152
|
||||
@@ -5,11 +5,15 @@ import logging
|
||||
import os
|
||||
from typing import TYPE_CHECKING, Any, TypedDict, cast
|
||||
|
||||
from pydantic import BaseModel
|
||||
from typing_extensions import Required
|
||||
from dotenv import load_dotenv
|
||||
from mypy_boto3_bedrock_runtime.client import BedrockRuntimeClient
|
||||
from pydantic import BaseModel, Field, PrivateAttr, model_validator
|
||||
from typing_extensions import Required, Self
|
||||
|
||||
from crewai.events.types.llm_events import LLMCallType
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.llm.base_llm import BaseLLM
|
||||
from crewai.llm.core import CONTEXT_WINDOW_USAGE_RATIO
|
||||
from crewai.llm.providers.utils.common import safe_tool_conversion
|
||||
from crewai.utilities.agent_utils import is_context_length_exceeded
|
||||
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
LLMContextLengthExceededError,
|
||||
@@ -30,7 +34,8 @@ if TYPE_CHECKING:
|
||||
ToolTypeDef,
|
||||
)
|
||||
|
||||
from crewai.llms.hooks.base import BaseInterceptor
|
||||
from crewai.agent.core import Agent
|
||||
from crewai.task import Task
|
||||
|
||||
|
||||
try:
|
||||
@@ -72,6 +77,9 @@ else:
|
||||
topK: int
|
||||
|
||||
|
||||
load_dotenv()
|
||||
|
||||
|
||||
class ToolInputSchema(TypedDict):
|
||||
"""Type definition for tool input schema in Converse API."""
|
||||
|
||||
@@ -141,74 +149,84 @@ class BedrockCompletion(BaseLLM):
|
||||
- Complete streaming event handling (messageStart, contentBlockStart, etc.)
|
||||
- Response metadata and trace information capture
|
||||
- Model-specific conversation format handling (e.g., Cohere requirements)
|
||||
|
||||
Attributes:
|
||||
model: The Bedrock model ID to use
|
||||
aws_access_key_id: AWS access key (defaults to environment variable)
|
||||
aws_secret_access_key: AWS secret key (defaults to environment variable)
|
||||
aws_session_token: AWS session token for temporary credentials
|
||||
region_name: AWS region name
|
||||
max_tokens: Maximum tokens to generate
|
||||
top_p: Nucleus sampling parameter
|
||||
top_k: Top-k sampling parameter (Claude models only)
|
||||
stream: Whether to use streaming responses
|
||||
guardrail_config: Guardrail configuration for content filtering
|
||||
additional_model_request_fields: Model-specific request parameters
|
||||
additional_model_response_field_paths: Custom response field paths
|
||||
interceptor: HTTP interceptor (not yet supported for Bedrock)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model: str = "anthropic.claude-3-5-sonnet-20241022-v2:0",
|
||||
aws_access_key_id: str | None = None,
|
||||
aws_secret_access_key: str | None = None,
|
||||
aws_session_token: str | None = None,
|
||||
region_name: str = "us-east-1",
|
||||
temperature: float | None = None,
|
||||
max_tokens: int | None = None,
|
||||
top_p: float | None = None,
|
||||
top_k: int | None = None,
|
||||
stop_sequences: Sequence[str] | None = None,
|
||||
stream: bool = False,
|
||||
guardrail_config: dict[str, Any] | None = None,
|
||||
additional_model_request_fields: dict[str, Any] | None = None,
|
||||
additional_model_response_field_paths: list[str] | None = None,
|
||||
interceptor: BaseInterceptor[Any, Any] | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize AWS Bedrock completion client.
|
||||
aws_access_key_id: str = Field( # type: ignore[assignment]
|
||||
default_factory=lambda: os.getenv("AWS_ACCESS_KEY_ID"),
|
||||
description="AWS access key (defaults to environment variable)",
|
||||
)
|
||||
aws_secret_access_key: str = Field( # type: ignore[assignment]
|
||||
default_factory=lambda: os.getenv("AWS_SECRET_ACCESS_KEY"),
|
||||
description="AWS secret key (defaults to environment variable)",
|
||||
)
|
||||
aws_session_token: str = Field( # type: ignore[assignment]
|
||||
default_factory=lambda: os.getenv("AWS_SESSION_TOKEN"),
|
||||
description="AWS session token for temporary credentials",
|
||||
)
|
||||
region_name: str = Field(
|
||||
default_factory=lambda: os.getenv("AWS_REGION", "us-east-1"),
|
||||
description="AWS region name",
|
||||
)
|
||||
max_tokens: int | None = Field(
|
||||
default=None, description="Maximum tokens to generate"
|
||||
)
|
||||
top_p: float | None = Field(default=None, description="Nucleus sampling parameter")
|
||||
top_k: int | None = Field(
|
||||
default=None, description="Top-k sampling parameter (Claude models only)"
|
||||
)
|
||||
stream: bool = Field(
|
||||
default=False, description="Whether to use streaming responses"
|
||||
)
|
||||
guardrail_config: dict[str, Any] = Field(
|
||||
default_factory=dict,
|
||||
description="Guardrail configuration for content filtering",
|
||||
)
|
||||
additional_model_request_fields: dict[str, Any] = Field(
|
||||
default_factory=dict, description="Model-specific request parameters"
|
||||
)
|
||||
additional_model_response_field_paths: list[str] = Field(
|
||||
default_factory=list, description="Custom response field paths"
|
||||
)
|
||||
_client: BedrockRuntimeClient = PrivateAttr( # type: ignore[assignment]
|
||||
default_factory=lambda: Session().client,
|
||||
)
|
||||
|
||||
Args:
|
||||
model: The Bedrock model ID to use
|
||||
aws_access_key_id: AWS access key (defaults to environment variable)
|
||||
aws_secret_access_key: AWS secret key (defaults to environment variable)
|
||||
aws_session_token: AWS session token for temporary credentials
|
||||
region_name: AWS region name
|
||||
temperature: Sampling temperature for response generation
|
||||
max_tokens: Maximum tokens to generate
|
||||
top_p: Nucleus sampling parameter
|
||||
top_k: Top-k sampling parameter (Claude models only)
|
||||
stop_sequences: List of sequences that stop generation
|
||||
stream: Whether to use streaming responses
|
||||
guardrail_config: Guardrail configuration for content filtering
|
||||
additional_model_request_fields: Model-specific request parameters
|
||||
additional_model_response_field_paths: Custom response field paths
|
||||
interceptor: HTTP interceptor (not yet supported for Bedrock).
|
||||
**kwargs: Additional parameters
|
||||
"""
|
||||
if interceptor is not None:
|
||||
_is_claude_model: bool = PrivateAttr(default=False)
|
||||
_supports_tools: bool = PrivateAttr(default=True)
|
||||
_supports_streaming: bool = PrivateAttr(default=True)
|
||||
_model_id: str = PrivateAttr()
|
||||
|
||||
@model_validator(mode="after")
|
||||
def setup_client(self) -> Self:
|
||||
"""Initialize the Bedrock client and validate configuration."""
|
||||
if self.interceptor is not None:
|
||||
raise NotImplementedError(
|
||||
"HTTP interceptors are not yet supported for AWS Bedrock provider. "
|
||||
"Interceptors are currently supported for OpenAI and Anthropic providers only."
|
||||
)
|
||||
|
||||
# Extract provider from kwargs to avoid duplicate argument
|
||||
kwargs.pop("provider", None)
|
||||
|
||||
super().__init__(
|
||||
model=model,
|
||||
temperature=temperature,
|
||||
stop=stop_sequences or [],
|
||||
provider="bedrock",
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
# Initialize Bedrock client with proper configuration
|
||||
session = Session(
|
||||
aws_access_key_id=aws_access_key_id or os.getenv("AWS_ACCESS_KEY_ID"),
|
||||
aws_secret_access_key=aws_secret_access_key
|
||||
or os.getenv("AWS_SECRET_ACCESS_KEY"),
|
||||
aws_session_token=aws_session_token or os.getenv("AWS_SESSION_TOKEN"),
|
||||
region_name=region_name,
|
||||
aws_access_key_id=self.aws_access_key_id,
|
||||
aws_secret_access_key=self.aws_secret_access_key,
|
||||
aws_session_token=self.aws_session_token,
|
||||
region_name=self.region_name,
|
||||
)
|
||||
|
||||
# Configure client with timeouts and retries following AWS best practices
|
||||
config = Config(
|
||||
read_timeout=300,
|
||||
retries={
|
||||
@@ -218,54 +236,34 @@ class BedrockCompletion(BaseLLM):
|
||||
tcp_keepalive=True,
|
||||
)
|
||||
|
||||
self.client = session.client("bedrock-runtime", config=config)
|
||||
self.region_name = region_name
|
||||
self._client = session.client("bedrock-runtime", config=config)
|
||||
|
||||
# Store completion parameters
|
||||
self.max_tokens = max_tokens
|
||||
self.top_p = top_p
|
||||
self.top_k = top_k
|
||||
self.stream = stream
|
||||
self.stop_sequences = stop_sequences or []
|
||||
self._is_claude_model = "claude" in self.model.lower()
|
||||
self._supports_tools = True
|
||||
self._supports_streaming = True
|
||||
self._model_id = self.model
|
||||
|
||||
# Store advanced features (optional)
|
||||
self.guardrail_config = guardrail_config
|
||||
self.additional_model_request_fields = additional_model_request_fields
|
||||
self.additional_model_response_field_paths = (
|
||||
additional_model_response_field_paths
|
||||
)
|
||||
|
||||
# Model-specific settings
|
||||
self.is_claude_model = "claude" in model.lower()
|
||||
self.supports_tools = True # Converse API supports tools for most models
|
||||
self.supports_streaming = True
|
||||
|
||||
# Handle inference profiles for newer models
|
||||
self.model_id = model
|
||||
return self
|
||||
|
||||
@property
|
||||
def stop(self) -> list[str]:
|
||||
"""Get stop sequences sent to the API."""
|
||||
return list(self.stop_sequences)
|
||||
def is_claude_model(self) -> bool:
|
||||
"""Check if model is a Claude model."""
|
||||
return self._is_claude_model
|
||||
|
||||
@stop.setter
|
||||
def stop(self, value: Sequence[str] | str | None) -> None:
|
||||
"""Set stop sequences.
|
||||
@property
|
||||
def supports_tools(self) -> bool:
|
||||
"""Check if model supports tools."""
|
||||
return self._supports_tools
|
||||
|
||||
Synchronizes stop_sequences to ensure values set by CrewAgentExecutor
|
||||
are properly sent to the Bedrock API.
|
||||
@property
|
||||
def supports_streaming(self) -> bool:
|
||||
"""Check if model supports streaming."""
|
||||
return self._supports_streaming
|
||||
|
||||
Args:
|
||||
value: Stop sequences as a Sequence, single string, or None
|
||||
"""
|
||||
if value is None:
|
||||
self.stop_sequences = []
|
||||
elif isinstance(value, str):
|
||||
self.stop_sequences = [value]
|
||||
elif isinstance(value, Sequence):
|
||||
self.stop_sequences = list(value)
|
||||
else:
|
||||
self.stop_sequences = []
|
||||
@property
|
||||
def model_id(self) -> str:
|
||||
"""Get the model ID."""
|
||||
return self._model_id
|
||||
|
||||
def call(
|
||||
self,
|
||||
@@ -273,8 +271,8 @@ class BedrockCompletion(BaseLLM):
|
||||
tools: list[dict[Any, Any]] | None = None,
|
||||
callbacks: list[Any] | None = None,
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str | Any:
|
||||
"""Call AWS Bedrock Converse API."""
|
||||
@@ -359,8 +357,8 @@ class BedrockCompletion(BaseLLM):
|
||||
messages: list[dict[str, Any]],
|
||||
body: BedrockConverseRequestBody,
|
||||
available_functions: Mapping[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
) -> str:
|
||||
"""Handle non-streaming converse API call following AWS best practices."""
|
||||
try:
|
||||
@@ -378,7 +376,7 @@ class BedrockCompletion(BaseLLM):
|
||||
raise ValueError(f"Invalid message format at index {i}")
|
||||
|
||||
# Call Bedrock Converse API with proper error handling
|
||||
response = self.client.converse(
|
||||
response = self._client.converse(
|
||||
modelId=self.model_id,
|
||||
messages=cast(
|
||||
"Sequence[MessageTypeDef | MessageOutputTypeDef]",
|
||||
@@ -540,8 +538,8 @@ class BedrockCompletion(BaseLLM):
|
||||
messages: list[dict[str, Any]],
|
||||
body: BedrockConverseRequestBody,
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
) -> str:
|
||||
"""Handle streaming converse API call with comprehensive event handling."""
|
||||
full_response = ""
|
||||
@@ -549,7 +547,7 @@ class BedrockCompletion(BaseLLM):
|
||||
tool_use_id = None
|
||||
|
||||
try:
|
||||
response = self.client.converse_stream(
|
||||
response = self._client.converse_stream(
|
||||
modelId=self.model_id,
|
||||
messages=cast(
|
||||
"Sequence[MessageTypeDef | MessageOutputTypeDef]",
|
||||
@@ -778,7 +776,6 @@ class BedrockCompletion(BaseLLM):
|
||||
tools: list[dict[str, Any]],
|
||||
) -> list[ConverseToolTypeDef]:
|
||||
"""Convert CrewAI tools to Converse API format following AWS specification."""
|
||||
from crewai.llms.providers.utils.common import safe_tool_conversion
|
||||
|
||||
converse_tools: list[ConverseToolTypeDef] = []
|
||||
|
||||
@@ -818,8 +815,8 @@ class BedrockCompletion(BaseLLM):
|
||||
config["temperature"] = float(self.temperature)
|
||||
if self.top_p is not None:
|
||||
config["topP"] = float(self.top_p)
|
||||
if self.stop_sequences:
|
||||
config["stopSequences"] = self.stop_sequences
|
||||
if self.stop:
|
||||
config["stopSequences"] = self.stop
|
||||
|
||||
if self.is_claude_model and self.top_k is not None:
|
||||
# top_k is supported by Claude models
|
||||
@@ -871,7 +868,6 @@ class BedrockCompletion(BaseLLM):
|
||||
|
||||
def get_context_window_size(self) -> int:
|
||||
"""Get the context window size for the model."""
|
||||
from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO
|
||||
|
||||
# Context window sizes for common Bedrock models
|
||||
context_windows = {
|
||||
@@ -1,12 +1,17 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
from typing import Any, cast
|
||||
from typing import TYPE_CHECKING, Any, cast
|
||||
|
||||
from pydantic import BaseModel
|
||||
from dotenv import load_dotenv
|
||||
from pydantic import BaseModel, Field, PrivateAttr, model_validator
|
||||
from typing_extensions import Self
|
||||
|
||||
from crewai.events.types.llm_events import LLMCallType
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.llms.hooks.base import BaseInterceptor
|
||||
from crewai.llm.base_llm import BaseLLM
|
||||
from crewai.llm.core import CONTEXT_WINDOW_USAGE_RATIO, LLM_CONTEXT_WINDOW_SIZES
|
||||
from crewai.llm.providers.utils.common import safe_tool_conversion
|
||||
from crewai.utilities.agent_utils import is_context_length_exceeded
|
||||
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
LLMContextLengthExceededError,
|
||||
@@ -14,6 +19,11 @@ from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
from crewai.utilities.types import LLMMessage
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agent.core import Agent
|
||||
from crewai.task import Task
|
||||
|
||||
|
||||
try:
|
||||
from google import genai # type: ignore[import-untyped]
|
||||
from google.genai import types # type: ignore[import-untyped]
|
||||
@@ -24,111 +34,93 @@ except ImportError:
|
||||
) from None
|
||||
|
||||
|
||||
load_dotenv()
|
||||
|
||||
|
||||
class GeminiCompletion(BaseLLM):
|
||||
"""Google Gemini native completion implementation.
|
||||
|
||||
This class provides direct integration with the Google Gen AI Python SDK,
|
||||
offering native function calling, streaming support, and proper Gemini formatting.
|
||||
|
||||
Attributes:
|
||||
model: Gemini model name (e.g., 'gemini-2.0-flash-001', 'gemini-1.5-pro')
|
||||
project: Google Cloud project ID (for Vertex AI)
|
||||
location: Google Cloud location (for Vertex AI, defaults to 'us-central1')
|
||||
top_p: Nucleus sampling parameter
|
||||
top_k: Top-k sampling parameter
|
||||
max_output_tokens: Maximum tokens in response
|
||||
stream: Enable streaming responses
|
||||
safety_settings: Safety filter settings
|
||||
client_params: Additional parameters for Google Gen AI Client constructor
|
||||
interceptor: HTTP interceptor (not yet supported for Gemini)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model: str = "gemini-2.0-flash-001",
|
||||
api_key: str | None = None,
|
||||
project: str | None = None,
|
||||
location: str | None = None,
|
||||
temperature: float | None = None,
|
||||
top_p: float | None = None,
|
||||
top_k: int | None = None,
|
||||
max_output_tokens: int | None = None,
|
||||
stop_sequences: list[str] | None = None,
|
||||
stream: bool = False,
|
||||
safety_settings: dict[str, Any] | None = None,
|
||||
client_params: dict[str, Any] | None = None,
|
||||
interceptor: BaseInterceptor[Any, Any] | None = None,
|
||||
**kwargs: Any,
|
||||
):
|
||||
"""Initialize Google Gemini chat completion client.
|
||||
project: str | None = Field(
|
||||
default_factory=lambda: os.getenv("GOOGLE_CLOUD_PROJECT"),
|
||||
description="Google Cloud project ID (for Vertex AI)",
|
||||
)
|
||||
location: str = Field(
|
||||
default_factory=lambda: os.getenv("GOOGLE_CLOUD_LOCATION", "us-central1"),
|
||||
description="Google Cloud location (for Vertex AI, defaults to 'us-central1')",
|
||||
)
|
||||
top_p: float | None = Field(default=None, description="Nucleus sampling parameter")
|
||||
top_k: int | None = Field(default=None, description="Top-k sampling parameter")
|
||||
max_output_tokens: int | None = Field(
|
||||
default=None, description="Maximum tokens in response"
|
||||
)
|
||||
stream: bool = Field(default=False, description="Enable streaming responses")
|
||||
safety_settings: dict[str, Any] = Field(
|
||||
default_factory=dict, description="Safety filter settings"
|
||||
)
|
||||
client_params: dict[str, Any] = Field(
|
||||
default_factory=dict,
|
||||
description="Additional parameters for Google Gen AI Client constructor",
|
||||
)
|
||||
_client: Any = PrivateAttr(default=None)
|
||||
|
||||
Args:
|
||||
model: Gemini model name (e.g., 'gemini-2.0-flash-001', 'gemini-1.5-pro')
|
||||
api_key: Google API key (defaults to GOOGLE_API_KEY or GEMINI_API_KEY env var)
|
||||
project: Google Cloud project ID (for Vertex AI)
|
||||
location: Google Cloud location (for Vertex AI, defaults to 'us-central1')
|
||||
temperature: Sampling temperature (0-2)
|
||||
top_p: Nucleus sampling parameter
|
||||
top_k: Top-k sampling parameter
|
||||
max_output_tokens: Maximum tokens in response
|
||||
stop_sequences: Stop sequences
|
||||
stream: Enable streaming responses
|
||||
safety_settings: Safety filter settings
|
||||
client_params: Additional parameters to pass to the Google Gen AI Client constructor.
|
||||
Supports parameters like http_options, credentials, debug_config, etc.
|
||||
interceptor: HTTP interceptor (not yet supported for Gemini).
|
||||
**kwargs: Additional parameters
|
||||
"""
|
||||
if interceptor is not None:
|
||||
_is_gemini_2: bool = PrivateAttr(default=False)
|
||||
_is_gemini_1_5: bool = PrivateAttr(default=False)
|
||||
_supports_tools: bool = PrivateAttr(default=False)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def setup_client(self) -> Self:
|
||||
"""Initialize the Gemini client and validate configuration."""
|
||||
if self.interceptor is not None:
|
||||
raise NotImplementedError(
|
||||
"HTTP interceptors are not yet supported for Google Gemini provider. "
|
||||
"Interceptors are currently supported for OpenAI and Anthropic providers only."
|
||||
)
|
||||
|
||||
super().__init__(
|
||||
model=model, temperature=temperature, stop=stop_sequences or [], **kwargs
|
||||
)
|
||||
|
||||
# Store client params for later use
|
||||
self.client_params = client_params or {}
|
||||
|
||||
# Get API configuration with environment variable fallbacks
|
||||
self.api_key = (
|
||||
api_key or os.getenv("GOOGLE_API_KEY") or os.getenv("GEMINI_API_KEY")
|
||||
)
|
||||
self.project = project or os.getenv("GOOGLE_CLOUD_PROJECT")
|
||||
self.location = location or os.getenv("GOOGLE_CLOUD_LOCATION") or "us-central1"
|
||||
if self.api_key is None:
|
||||
self.api_key = os.getenv("GOOGLE_API_KEY") or os.getenv("GEMINI_API_KEY")
|
||||
|
||||
use_vertexai = os.getenv("GOOGLE_GENAI_USE_VERTEXAI", "").lower() == "true"
|
||||
|
||||
self.client = self._initialize_client(use_vertexai)
|
||||
self._client = self._initialize_client(use_vertexai)
|
||||
|
||||
# Store completion parameters
|
||||
self.top_p = top_p
|
||||
self.top_k = top_k
|
||||
self.max_output_tokens = max_output_tokens
|
||||
self.stream = stream
|
||||
self.safety_settings = safety_settings or {}
|
||||
self.stop_sequences = stop_sequences or []
|
||||
self._is_gemini_2 = "gemini-2" in self.model.lower()
|
||||
self._is_gemini_1_5 = "gemini-1.5" in self.model.lower()
|
||||
self._supports_tools = self._is_gemini_1_5 or self._is_gemini_2
|
||||
|
||||
# Model-specific settings
|
||||
self.is_gemini_2 = "gemini-2" in model.lower()
|
||||
self.is_gemini_1_5 = "gemini-1.5" in model.lower()
|
||||
self.supports_tools = self.is_gemini_1_5 or self.is_gemini_2
|
||||
return self
|
||||
|
||||
@property
|
||||
def stop(self) -> list[str]:
|
||||
"""Get stop sequences sent to the API."""
|
||||
return self.stop_sequences
|
||||
def is_gemini_2(self) -> bool:
|
||||
"""Check if model is Gemini 2."""
|
||||
return self._is_gemini_2
|
||||
|
||||
@stop.setter
|
||||
def stop(self, value: list[str] | str | None) -> None:
|
||||
"""Set stop sequences.
|
||||
@property
|
||||
def is_gemini_1_5(self) -> bool:
|
||||
"""Check if model is Gemini 1.5."""
|
||||
return self._is_gemini_1_5
|
||||
|
||||
Synchronizes stop_sequences to ensure values set by CrewAgentExecutor
|
||||
are properly sent to the Gemini API.
|
||||
@property
|
||||
def supports_tools(self) -> bool:
|
||||
"""Check if model supports tools."""
|
||||
return self._supports_tools
|
||||
|
||||
Args:
|
||||
value: Stop sequences as a list, single string, or None
|
||||
"""
|
||||
if value is None:
|
||||
self.stop_sequences = []
|
||||
elif isinstance(value, str):
|
||||
self.stop_sequences = [value]
|
||||
elif isinstance(value, list):
|
||||
self.stop_sequences = value
|
||||
else:
|
||||
self.stop_sequences = []
|
||||
|
||||
def _initialize_client(self, use_vertexai: bool = False) -> genai.Client: # type: ignore[no-any-unimported]
|
||||
def _initialize_client(self, use_vertexai: bool = False) -> Any:
|
||||
"""Initialize the Google Gen AI client with proper parameter handling.
|
||||
|
||||
Args:
|
||||
@@ -150,12 +142,9 @@ class GeminiCompletion(BaseLLM):
|
||||
"location": self.location,
|
||||
}
|
||||
)
|
||||
|
||||
client_params.pop("api_key", None)
|
||||
|
||||
elif self.api_key:
|
||||
client_params["api_key"] = self.api_key
|
||||
|
||||
client_params.pop("vertexai", None)
|
||||
client_params.pop("project", None)
|
||||
client_params.pop("location", None)
|
||||
@@ -180,11 +169,10 @@ class GeminiCompletion(BaseLLM):
|
||||
params = {}
|
||||
|
||||
if (
|
||||
hasattr(self, "client")
|
||||
and hasattr(self.client, "vertexai")
|
||||
and self.client.vertexai
|
||||
hasattr(self, "_client")
|
||||
and hasattr(self._client, "vertexai")
|
||||
and self._client.vertexai
|
||||
):
|
||||
# Vertex AI configuration
|
||||
params.update(
|
||||
{
|
||||
"vertexai": True,
|
||||
@@ -206,8 +194,8 @@ class GeminiCompletion(BaseLLM):
|
||||
tools: list[dict[str, Any]] | None = None,
|
||||
callbacks: list[Any] | None = None,
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str | Any:
|
||||
"""Call Google Gemini generate content API.
|
||||
@@ -296,15 +284,12 @@ class GeminiCompletion(BaseLLM):
|
||||
self.tools = tools
|
||||
config_params = {}
|
||||
|
||||
# Add system instruction if present
|
||||
if system_instruction:
|
||||
# Convert system instruction to Content format
|
||||
system_content = types.Content(
|
||||
role="user", parts=[types.Part.from_text(text=system_instruction)]
|
||||
)
|
||||
config_params["system_instruction"] = system_content
|
||||
|
||||
# Add generation config parameters
|
||||
if self.temperature is not None:
|
||||
config_params["temperature"] = self.temperature
|
||||
if self.top_p is not None:
|
||||
@@ -313,14 +298,13 @@ class GeminiCompletion(BaseLLM):
|
||||
config_params["top_k"] = self.top_k
|
||||
if self.max_output_tokens is not None:
|
||||
config_params["max_output_tokens"] = self.max_output_tokens
|
||||
if self.stop_sequences:
|
||||
config_params["stop_sequences"] = self.stop_sequences
|
||||
if self.stop:
|
||||
config_params["stop_sequences"] = self.stop
|
||||
|
||||
if response_model:
|
||||
config_params["response_mime_type"] = "application/json"
|
||||
config_params["response_schema"] = response_model.model_json_schema()
|
||||
|
||||
# Handle tools for supported models
|
||||
if tools and self.supports_tools:
|
||||
config_params["tools"] = self._convert_tools_for_interference(tools)
|
||||
|
||||
@@ -335,8 +319,6 @@ class GeminiCompletion(BaseLLM):
|
||||
"""Convert CrewAI tool format to Gemini function declaration format."""
|
||||
gemini_tools = []
|
||||
|
||||
from crewai.llms.providers.utils.common import safe_tool_conversion
|
||||
|
||||
for tool in tools:
|
||||
name, description, parameters = safe_tool_conversion(tool, "Gemini")
|
||||
|
||||
@@ -345,7 +327,6 @@ class GeminiCompletion(BaseLLM):
|
||||
description=description,
|
||||
)
|
||||
|
||||
# Add parameters if present - ensure parameters is a dict
|
||||
if parameters and isinstance(parameters, dict):
|
||||
function_declaration.parameters = parameters
|
||||
|
||||
@@ -381,16 +362,12 @@ class GeminiCompletion(BaseLLM):
|
||||
content = message.get("content", "")
|
||||
|
||||
if role == "system":
|
||||
# Extract system instruction - Gemini handles it separately
|
||||
if system_instruction:
|
||||
system_instruction += f"\n\n{content}"
|
||||
else:
|
||||
system_instruction = cast(str, content)
|
||||
else:
|
||||
# Convert role for Gemini (assistant -> model)
|
||||
gemini_role = "model" if role == "assistant" else "user"
|
||||
|
||||
# Create Content object
|
||||
gemini_content = types.Content(
|
||||
role=gemini_role, parts=[types.Part.from_text(text=content)]
|
||||
)
|
||||
@@ -404,8 +381,8 @@ class GeminiCompletion(BaseLLM):
|
||||
system_instruction: str | None,
|
||||
config: types.GenerateContentConfig,
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str | Any:
|
||||
"""Handle non-streaming content generation."""
|
||||
@@ -416,7 +393,7 @@ class GeminiCompletion(BaseLLM):
|
||||
}
|
||||
|
||||
try:
|
||||
response = self.client.models.generate_content(**api_params)
|
||||
response = self._client.models.generate_content(**api_params)
|
||||
|
||||
usage = self._extract_token_usage(response)
|
||||
except Exception as e:
|
||||
@@ -470,8 +447,8 @@ class GeminiCompletion(BaseLLM):
|
||||
contents: list[types.Content],
|
||||
config: types.GenerateContentConfig,
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str:
|
||||
"""Handle streaming content generation."""
|
||||
@@ -484,7 +461,7 @@ class GeminiCompletion(BaseLLM):
|
||||
"config": config,
|
||||
}
|
||||
|
||||
for chunk in self.client.models.generate_content_stream(**api_params):
|
||||
for chunk in self._client.models.generate_content_stream(**api_params):
|
||||
if hasattr(chunk, "text") and chunk.text:
|
||||
full_response += chunk.text
|
||||
self._emit_stream_chunk_event(
|
||||
@@ -507,13 +484,11 @@ class GeminiCompletion(BaseLLM):
|
||||
else {},
|
||||
}
|
||||
|
||||
# Handle completed function calls
|
||||
if function_calls and available_functions:
|
||||
for call_data in function_calls.values():
|
||||
function_name = call_data["name"]
|
||||
function_args = call_data["args"]
|
||||
|
||||
# Execute tool
|
||||
result = self._handle_tool_execution(
|
||||
function_name=function_name,
|
||||
function_args=function_args,
|
||||
@@ -547,7 +522,6 @@ class GeminiCompletion(BaseLLM):
|
||||
|
||||
def get_context_window_size(self) -> int:
|
||||
"""Get the context window size for the model."""
|
||||
from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO, LLM_CONTEXT_WINDOW_SIZES
|
||||
|
||||
min_context = 1024
|
||||
max_context = 2097152
|
||||
@@ -574,13 +548,11 @@ class GeminiCompletion(BaseLLM):
|
||||
"gemma-3-27b": 128000,
|
||||
}
|
||||
|
||||
# Find the best match for the model name
|
||||
for model_prefix, size in context_windows.items():
|
||||
if self.model.startswith(model_prefix):
|
||||
return int(size * CONTEXT_WINDOW_USAGE_RATIO)
|
||||
|
||||
# Default context window size for Gemini models
|
||||
return int(1048576 * CONTEXT_WINDOW_USAGE_RATIO) # 1M tokens
|
||||
return int(1048576 * CONTEXT_WINDOW_USAGE_RATIO)
|
||||
|
||||
def _extract_token_usage(self, response: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Extract token usage from Gemini response."""
|
||||
@@ -6,16 +6,20 @@ import logging
|
||||
import os
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from dotenv import load_dotenv
|
||||
import httpx
|
||||
from openai import APIConnectionError, NotFoundError, OpenAI
|
||||
from openai.types.chat import ChatCompletion, ChatCompletionChunk
|
||||
from openai.types.chat.chat_completion import Choice
|
||||
from openai.types.chat.chat_completion_chunk import ChoiceDelta
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, Field, PrivateAttr, model_validator
|
||||
from typing_extensions import Self
|
||||
|
||||
from crewai.events.types.llm_events import LLMCallType
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.llms.hooks.transport import HTTPTransport
|
||||
from crewai.llm.base_llm import BaseLLM
|
||||
from crewai.llm.core import CONTEXT_WINDOW_USAGE_RATIO, LLM_CONTEXT_WINDOW_SIZES
|
||||
from crewai.llm.hooks.transport import HTTPTransport
|
||||
from crewai.llm.providers.utils.common import safe_tool_conversion
|
||||
from crewai.utilities.agent_utils import is_context_length_exceeded
|
||||
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
LLMContextLengthExceededError,
|
||||
@@ -25,11 +29,13 @@ from crewai.utilities.types import LLMMessage
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agent.core import Agent
|
||||
from crewai.llms.hooks.base import BaseInterceptor
|
||||
from crewai.task import Task
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
|
||||
|
||||
load_dotenv()
|
||||
|
||||
|
||||
class OpenAICompletion(BaseLLM):
|
||||
"""OpenAI native completion implementation.
|
||||
|
||||
@@ -37,60 +43,56 @@ class OpenAICompletion(BaseLLM):
|
||||
offering native structured outputs, function calling, and streaming support.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model: str = "gpt-4o",
|
||||
api_key: str | None = None,
|
||||
base_url: str | None = None,
|
||||
organization: str | None = None,
|
||||
project: str | None = None,
|
||||
timeout: float | None = None,
|
||||
max_retries: int = 2,
|
||||
default_headers: dict[str, str] | None = None,
|
||||
default_query: dict[str, Any] | None = None,
|
||||
client_params: dict[str, Any] | None = None,
|
||||
temperature: float | None = None,
|
||||
top_p: float | None = None,
|
||||
frequency_penalty: float | None = None,
|
||||
presence_penalty: float | None = None,
|
||||
max_tokens: int | None = None,
|
||||
max_completion_tokens: int | None = None,
|
||||
seed: int | None = None,
|
||||
stream: bool = False,
|
||||
response_format: dict[str, Any] | type[BaseModel] | None = None,
|
||||
logprobs: bool | None = None,
|
||||
top_logprobs: int | None = None,
|
||||
reasoning_effort: str | None = None,
|
||||
provider: str | None = None,
|
||||
interceptor: BaseInterceptor[httpx.Request, httpx.Response] | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize OpenAI chat completion client."""
|
||||
# Client configuration fields
|
||||
organization: str | None = Field(default=None, description="OpenAI organization ID")
|
||||
project: str | None = Field(default=None, description="OpenAI project ID")
|
||||
max_retries: int = Field(default=2, description="Maximum number of retries")
|
||||
default_headers: dict[str, str] = Field(
|
||||
default_factory=dict, description="Default headers for requests"
|
||||
)
|
||||
default_query: dict[str, Any] = Field(
|
||||
default_factory=dict, description="Default query parameters"
|
||||
)
|
||||
client_params: dict[str, Any] = Field(
|
||||
default_factory=dict, description="Additional client parameters"
|
||||
)
|
||||
timeout: float | None = Field(default=None, description="Request timeout")
|
||||
api_base: str | None = Field(
|
||||
default=None, description="API base URL", deprecated=True
|
||||
)
|
||||
|
||||
if provider is None:
|
||||
provider = kwargs.pop("provider", "openai")
|
||||
# Completion parameters
|
||||
top_p: float | None = Field(default=None, description="Top-p sampling parameter")
|
||||
frequency_penalty: float | None = Field(
|
||||
default=None, description="Frequency penalty"
|
||||
)
|
||||
presence_penalty: float | None = Field(default=None, description="Presence penalty")
|
||||
max_tokens: int | None = Field(default=None, description="Maximum tokens")
|
||||
max_completion_tokens: int | None = Field(
|
||||
None, description="Maximum completion tokens"
|
||||
)
|
||||
seed: int | None = Field(default=None, description="Random seed")
|
||||
stream: bool = Field(default=False, description="Enable streaming")
|
||||
response_format: dict[str, Any] | type[BaseModel] | None = Field(
|
||||
default=None, description="Response format"
|
||||
)
|
||||
logprobs: bool | None = Field(default=None, description="Return log probabilities")
|
||||
top_logprobs: int | None = Field(
|
||||
default=None, description="Number of top log probabilities"
|
||||
)
|
||||
reasoning_effort: str | None = Field(
|
||||
default=None, description="Reasoning effort level"
|
||||
)
|
||||
|
||||
self.interceptor = interceptor
|
||||
# Client configuration attributes
|
||||
self.organization = organization
|
||||
self.project = project
|
||||
self.max_retries = max_retries
|
||||
self.default_headers = default_headers
|
||||
self.default_query = default_query
|
||||
self.client_params = client_params
|
||||
self.timeout = timeout
|
||||
self.base_url = base_url
|
||||
self.api_base = kwargs.pop("api_base", None)
|
||||
_client: OpenAI = PrivateAttr(default=None) # type: ignore[assignment]
|
||||
is_o1_model: bool = Field(default=False, description="Whether this is an O1 model")
|
||||
is_gpt4_model: bool = Field(
|
||||
default=False, description="Whether this is a GPT-4 model"
|
||||
)
|
||||
|
||||
super().__init__(
|
||||
model=model,
|
||||
temperature=temperature,
|
||||
api_key=api_key or os.getenv("OPENAI_API_KEY"),
|
||||
base_url=base_url,
|
||||
timeout=timeout,
|
||||
provider=provider,
|
||||
**kwargs,
|
||||
)
|
||||
@model_validator(mode="after")
|
||||
def setup_client(self) -> Self:
|
||||
"""Initialize OpenAI client after model validation."""
|
||||
|
||||
client_config = self._get_client_params()
|
||||
if self.interceptor:
|
||||
@@ -98,31 +100,15 @@ class OpenAICompletion(BaseLLM):
|
||||
http_client = httpx.Client(transport=transport)
|
||||
client_config["http_client"] = http_client
|
||||
|
||||
self.client = OpenAI(**client_config)
|
||||
self._client = OpenAI(**client_config)
|
||||
|
||||
# Completion parameters
|
||||
self.top_p = top_p
|
||||
self.frequency_penalty = frequency_penalty
|
||||
self.presence_penalty = presence_penalty
|
||||
self.max_tokens = max_tokens
|
||||
self.max_completion_tokens = max_completion_tokens
|
||||
self.seed = seed
|
||||
self.stream = stream
|
||||
self.response_format = response_format
|
||||
self.logprobs = logprobs
|
||||
self.top_logprobs = top_logprobs
|
||||
self.reasoning_effort = reasoning_effort
|
||||
self.is_o1_model = "o1" in model.lower()
|
||||
self.is_gpt4_model = "gpt-4" in model.lower()
|
||||
self.is_o1_model = "o1" in self.model.lower()
|
||||
self.is_gpt4_model = "gpt-4" in self.model.lower()
|
||||
|
||||
return self
|
||||
|
||||
def _get_client_params(self) -> dict[str, Any]:
|
||||
"""Get OpenAI client parameters."""
|
||||
|
||||
if self.api_key is None:
|
||||
self.api_key = os.getenv("OPENAI_API_KEY")
|
||||
if self.api_key is None:
|
||||
raise ValueError("OPENAI_API_KEY is required")
|
||||
|
||||
base_params = {
|
||||
"api_key": self.api_key,
|
||||
"organization": self.organization,
|
||||
@@ -268,7 +254,6 @@ class OpenAICompletion(BaseLLM):
|
||||
self, tools: list[dict[str, BaseTool]]
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Convert CrewAI tool format to OpenAI function calling format."""
|
||||
from crewai.llms.providers.utils.common import safe_tool_conversion
|
||||
|
||||
openai_tools = []
|
||||
|
||||
@@ -296,14 +281,14 @@ class OpenAICompletion(BaseLLM):
|
||||
self,
|
||||
params: dict[str, Any],
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str | Any:
|
||||
"""Handle non-streaming chat completion."""
|
||||
try:
|
||||
if response_model:
|
||||
parsed_response = self.client.beta.chat.completions.parse(
|
||||
parsed_response = self._client.beta.chat.completions.parse(
|
||||
**params,
|
||||
response_format=response_model,
|
||||
)
|
||||
@@ -327,7 +312,7 @@ class OpenAICompletion(BaseLLM):
|
||||
)
|
||||
return structured_json
|
||||
|
||||
response: ChatCompletion = self.client.chat.completions.create(**params)
|
||||
response: ChatCompletion = self._client.chat.completions.create(**params)
|
||||
|
||||
usage = self._extract_openai_token_usage(response)
|
||||
|
||||
@@ -419,8 +404,8 @@ class OpenAICompletion(BaseLLM):
|
||||
self,
|
||||
params: dict[str, Any],
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str:
|
||||
"""Handle streaming chat completion."""
|
||||
@@ -429,7 +414,7 @@ class OpenAICompletion(BaseLLM):
|
||||
|
||||
if response_model:
|
||||
completion_stream: Iterator[ChatCompletionChunk] = (
|
||||
self.client.chat.completions.create(**params)
|
||||
self._client.chat.completions.create(**params)
|
||||
)
|
||||
|
||||
accumulated_content = ""
|
||||
@@ -472,7 +457,7 @@ class OpenAICompletion(BaseLLM):
|
||||
)
|
||||
return accumulated_content
|
||||
|
||||
stream: Iterator[ChatCompletionChunk] = self.client.chat.completions.create(
|
||||
stream: Iterator[ChatCompletionChunk] = self._client.chat.completions.create(
|
||||
**params
|
||||
)
|
||||
|
||||
@@ -560,7 +545,6 @@ class OpenAICompletion(BaseLLM):
|
||||
|
||||
def get_context_window_size(self) -> int:
|
||||
"""Get the context window size for the model."""
|
||||
from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO, LLM_CONTEXT_WINDOW_SIZES
|
||||
|
||||
min_context = 1024
|
||||
max_context = 2097152
|
||||
@@ -1 +1,38 @@
|
||||
"""LLM implementations for crewAI."""
|
||||
"""LLM implementations for crewAI.
|
||||
|
||||
.. deprecated:: 1.4.0
|
||||
The `crewai.llms` package is deprecated. Use `crewai.llm` instead.
|
||||
|
||||
This package was reorganized from `crewai.llms.*` to `crewai.llm.*`.
|
||||
All submodules are redirected to their new locations in `crewai.llm.*`.
|
||||
|
||||
Migration guide:
|
||||
Old: from crewai.llms.base_llm import BaseLLM
|
||||
New: from crewai.llm.base_llm import BaseLLM
|
||||
|
||||
Old: from crewai.llms.hooks.base import BaseInterceptor
|
||||
New: from crewai.llm.hooks.base import BaseInterceptor
|
||||
|
||||
Old: from crewai.llms.constants import OPENAI_MODELS
|
||||
New: from crewai.llm.constants import OPENAI_MODELS
|
||||
|
||||
Or use top-level imports:
|
||||
from crewai import LLM, BaseLLM
|
||||
"""
|
||||
|
||||
import warnings
|
||||
|
||||
from crewai.llm import LLM
|
||||
from crewai.llm.base_llm import BaseLLM
|
||||
|
||||
|
||||
# Issue deprecation warning when this module is imported
|
||||
warnings.warn(
|
||||
"The 'crewai.llms' package is deprecated and will be removed in a future version. "
|
||||
"Please use 'crewai.llm' (singular) instead. "
|
||||
"All submodules have been reorganized from 'crewai.llms.*' to 'crewai.llm.*'.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
__all__ = ["LLM", "BaseLLM"]
|
||||
|
||||
@@ -1,550 +1,15 @@
|
||||
"""Base LLM abstract class for CrewAI.
|
||||
"""Deprecated: Use crewai.llm.base_llm instead.
|
||||
|
||||
This module provides the abstract base class for all LLM implementations
|
||||
in CrewAI, including common functionality for native SDK implementations.
|
||||
.. deprecated:: 1.4.0
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
import warnings
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from datetime import datetime
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
from typing import TYPE_CHECKING, Any, Final
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.llm_events import (
|
||||
LLMCallCompletedEvent,
|
||||
LLMCallFailedEvent,
|
||||
LLMCallStartedEvent,
|
||||
LLMCallType,
|
||||
LLMStreamChunkEvent,
|
||||
warnings.warn(
|
||||
"crewai.llms.base_llm is deprecated. Use crewai.llm.base_llm instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
from crewai.events.types.tool_usage_events import (
|
||||
ToolUsageErrorEvent,
|
||||
ToolUsageFinishedEvent,
|
||||
ToolUsageStartedEvent,
|
||||
)
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agent.core import Agent
|
||||
from crewai.task import Task
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.utilities.types import LLMMessage
|
||||
|
||||
|
||||
DEFAULT_CONTEXT_WINDOW_SIZE: Final[int] = 4096
|
||||
DEFAULT_SUPPORTS_STOP_WORDS: Final[bool] = True
|
||||
_JSON_EXTRACTION_PATTERN: Final[re.Pattern[str]] = re.compile(r"\{.*}", re.DOTALL)
|
||||
|
||||
|
||||
class BaseLLM(ABC):
|
||||
"""Abstract base class for LLM implementations.
|
||||
|
||||
This class defines the interface that all LLM implementations must follow.
|
||||
Users can extend this class to create custom LLM implementations that don't
|
||||
rely on litellm's authentication mechanism.
|
||||
|
||||
Custom LLM implementations should handle error cases gracefully, including
|
||||
timeouts, authentication failures, and malformed responses. They should also
|
||||
implement proper validation for input parameters and provide clear error
|
||||
messages when things go wrong.
|
||||
|
||||
Attributes:
|
||||
model: The model identifier/name.
|
||||
temperature: Optional temperature setting for response generation.
|
||||
stop: A list of stop sequences that the LLM should use to stop generation.
|
||||
additional_params: Additional provider-specific parameters.
|
||||
"""
|
||||
|
||||
is_litellm: bool = False
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model: str,
|
||||
temperature: float | None = None,
|
||||
api_key: str | None = None,
|
||||
base_url: str | None = None,
|
||||
provider: str | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize the BaseLLM with default attributes.
|
||||
|
||||
Args:
|
||||
model: The model identifier/name.
|
||||
temperature: Optional temperature setting for response generation.
|
||||
stop: Optional list of stop sequences for generation.
|
||||
**kwargs: Additional provider-specific parameters.
|
||||
"""
|
||||
if not model:
|
||||
raise ValueError("Model name is required and cannot be empty")
|
||||
|
||||
self.model = model
|
||||
self.temperature = temperature
|
||||
self.api_key = api_key
|
||||
self.base_url = base_url
|
||||
# Store additional parameters for provider-specific use
|
||||
self.additional_params = kwargs
|
||||
self._provider = provider or "openai"
|
||||
|
||||
stop = kwargs.pop("stop", None)
|
||||
if stop is None:
|
||||
self.stop: list[str] = []
|
||||
elif isinstance(stop, str):
|
||||
self.stop = [stop]
|
||||
elif isinstance(stop, list):
|
||||
self.stop = stop
|
||||
else:
|
||||
self.stop = []
|
||||
|
||||
self._token_usage = {
|
||||
"total_tokens": 0,
|
||||
"prompt_tokens": 0,
|
||||
"completion_tokens": 0,
|
||||
"successful_requests": 0,
|
||||
"cached_prompt_tokens": 0,
|
||||
}
|
||||
|
||||
@property
|
||||
def provider(self) -> str:
|
||||
"""Get the provider of the LLM."""
|
||||
return self._provider
|
||||
|
||||
@provider.setter
|
||||
def provider(self, value: str) -> None:
|
||||
"""Set the provider of the LLM."""
|
||||
self._provider = value
|
||||
|
||||
@abstractmethod
|
||||
def call(
|
||||
self,
|
||||
messages: str | list[LLMMessage],
|
||||
tools: list[dict[str, BaseTool]] | None = None,
|
||||
callbacks: list[Any] | None = None,
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> str | Any:
|
||||
"""Call the LLM with the given messages.
|
||||
|
||||
Args:
|
||||
messages: Input messages for the LLM.
|
||||
Can be a string or list of message dictionaries.
|
||||
If string, it will be converted to a single user message.
|
||||
If list, each dict must have 'role' and 'content' keys.
|
||||
tools: Optional list of tool schemas for function calling.
|
||||
Each tool should define its name, description, and parameters.
|
||||
callbacks: Optional list of callback functions to be executed
|
||||
during and after the LLM call.
|
||||
available_functions: Optional dict mapping function names to callables
|
||||
that can be invoked by the LLM.
|
||||
from_task: Optional task caller to be used for the LLM call.
|
||||
from_agent: Optional agent caller to be used for the LLM call.
|
||||
response_model: Optional response model to be used for the LLM call.
|
||||
|
||||
Returns:
|
||||
Either a text response from the LLM (str) or
|
||||
the result of a tool function call (Any).
|
||||
|
||||
Raises:
|
||||
ValueError: If the messages format is invalid.
|
||||
TimeoutError: If the LLM request times out.
|
||||
RuntimeError: If the LLM request fails for other reasons.
|
||||
"""
|
||||
|
||||
def _convert_tools_for_interference(
|
||||
self, tools: list[dict[str, BaseTool]]
|
||||
) -> list[dict[str, BaseTool]]:
|
||||
"""Convert tools to a format that can be used for interference.
|
||||
|
||||
Args:
|
||||
tools: List of tools to convert.
|
||||
|
||||
Returns:
|
||||
List of converted tools (default implementation returns as-is)
|
||||
"""
|
||||
return tools
|
||||
|
||||
def supports_stop_words(self) -> bool:
|
||||
"""Check if the LLM supports stop words.
|
||||
|
||||
Returns:
|
||||
True if the LLM supports stop words, False otherwise.
|
||||
"""
|
||||
return DEFAULT_SUPPORTS_STOP_WORDS
|
||||
|
||||
def _supports_stop_words_implementation(self) -> bool:
|
||||
"""Check if stop words are configured for this LLM instance.
|
||||
|
||||
Native providers can override supports_stop_words() to return this value
|
||||
to ensure consistent behavior based on whether stop words are actually configured.
|
||||
|
||||
Returns:
|
||||
True if stop words are configured and can be applied
|
||||
"""
|
||||
return bool(self.stop)
|
||||
|
||||
def _apply_stop_words(self, content: str) -> str:
|
||||
"""Apply stop words to truncate response content.
|
||||
|
||||
This method provides consistent stop word behavior across all native SDK providers.
|
||||
Native providers should call this method to post-process their responses.
|
||||
|
||||
Args:
|
||||
content: The raw response content from the LLM
|
||||
|
||||
Returns:
|
||||
Content truncated at the first occurrence of any stop word
|
||||
|
||||
Example:
|
||||
>>> llm = MyNativeLLM(stop=["Observation:", "Final Answer:"])
|
||||
>>> response = (
|
||||
... "I need to search.\\n\\nAction: search\\nObservation: Found results"
|
||||
... )
|
||||
>>> llm._apply_stop_words(response)
|
||||
"I need to search.\\n\\nAction: search"
|
||||
"""
|
||||
if not self.stop or not content:
|
||||
return content
|
||||
|
||||
# Find the earliest occurrence of any stop word
|
||||
earliest_stop_pos = len(content)
|
||||
found_stop_word = None
|
||||
|
||||
for stop_word in self.stop:
|
||||
stop_pos = content.find(stop_word)
|
||||
if stop_pos != -1 and stop_pos < earliest_stop_pos:
|
||||
earliest_stop_pos = stop_pos
|
||||
found_stop_word = stop_word
|
||||
|
||||
# Truncate at the stop word if found
|
||||
if found_stop_word is not None:
|
||||
truncated = content[:earliest_stop_pos].strip()
|
||||
logging.debug(
|
||||
f"Applied stop word '{found_stop_word}' at position {earliest_stop_pos}"
|
||||
)
|
||||
return truncated
|
||||
|
||||
return content
|
||||
|
||||
def get_context_window_size(self) -> int:
|
||||
"""Get the context window size for the LLM.
|
||||
|
||||
Returns:
|
||||
The number of tokens/characters the model can handle.
|
||||
"""
|
||||
# Default implementation - subclasses should override with model-specific values
|
||||
return DEFAULT_CONTEXT_WINDOW_SIZE
|
||||
|
||||
# Common helper methods for native SDK implementations
|
||||
|
||||
def _emit_call_started_event(
|
||||
self,
|
||||
messages: str | list[LLMMessage],
|
||||
tools: list[dict[str, BaseTool]] | None = None,
|
||||
callbacks: list[Any] | None = None,
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
) -> None:
|
||||
"""Emit LLM call started event."""
|
||||
if not hasattr(crewai_event_bus, "emit"):
|
||||
raise ValueError("crewai_event_bus does not have an emit method") from None
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LLMCallStartedEvent(
|
||||
messages=messages,
|
||||
tools=tools,
|
||||
callbacks=callbacks,
|
||||
available_functions=available_functions,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
model=self.model,
|
||||
),
|
||||
)
|
||||
|
||||
def _emit_call_completed_event(
|
||||
self,
|
||||
response: Any,
|
||||
call_type: LLMCallType,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
messages: str | list[dict[str, Any]] | None = None,
|
||||
) -> None:
|
||||
"""Emit LLM call completed event."""
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LLMCallCompletedEvent(
|
||||
messages=messages,
|
||||
response=response,
|
||||
call_type=call_type,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
model=self.model,
|
||||
),
|
||||
)
|
||||
|
||||
def _emit_call_failed_event(
|
||||
self,
|
||||
error: str,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
) -> None:
|
||||
"""Emit LLM call failed event."""
|
||||
if not hasattr(crewai_event_bus, "emit"):
|
||||
raise ValueError("crewai_event_bus does not have an emit method") from None
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LLMCallFailedEvent(
|
||||
error=error,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
def _emit_stream_chunk_event(
|
||||
self,
|
||||
chunk: str,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
tool_call: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
"""Emit stream chunk event."""
|
||||
if not hasattr(crewai_event_bus, "emit"):
|
||||
raise ValueError("crewai_event_bus does not have an emit method") from None
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LLMStreamChunkEvent(
|
||||
chunk=chunk,
|
||||
tool_call=tool_call,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
def _handle_tool_execution(
|
||||
self,
|
||||
function_name: str,
|
||||
function_args: dict[str, Any],
|
||||
available_functions: dict[str, Any],
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
) -> str | None:
|
||||
"""Handle tool execution with proper event emission.
|
||||
|
||||
Args:
|
||||
function_name: Name of the function to execute
|
||||
function_args: Arguments to pass to the function
|
||||
available_functions: Dict of available functions
|
||||
from_task: Optional task object
|
||||
from_agent: Optional agent object
|
||||
|
||||
Returns:
|
||||
Result of function execution or None if function not found
|
||||
"""
|
||||
if function_name not in available_functions:
|
||||
logging.warning(
|
||||
f"Function '{function_name}' not found in available functions"
|
||||
)
|
||||
return None
|
||||
|
||||
try:
|
||||
# Emit tool usage started event
|
||||
started_at = datetime.now()
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=ToolUsageStartedEvent(
|
||||
tool_name=function_name,
|
||||
tool_args=function_args,
|
||||
from_agent=from_agent,
|
||||
from_task=from_task,
|
||||
),
|
||||
)
|
||||
|
||||
# Execute the function
|
||||
fn = available_functions[function_name]
|
||||
result = fn(**function_args)
|
||||
|
||||
# Emit tool usage finished event
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=ToolUsageFinishedEvent(
|
||||
output=result,
|
||||
tool_name=function_name,
|
||||
tool_args=function_args,
|
||||
started_at=started_at,
|
||||
finished_at=datetime.now(),
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
# Emit LLM call completed event for tool call
|
||||
self._emit_call_completed_event(
|
||||
response=result,
|
||||
call_type=LLMCallType.TOOL_CALL,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
)
|
||||
|
||||
return str(result)
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Error executing function '{function_name}': {e!s}"
|
||||
logging.error(error_msg)
|
||||
|
||||
# Emit tool usage error event
|
||||
if not hasattr(crewai_event_bus, "emit"):
|
||||
raise ValueError(
|
||||
"crewai_event_bus does not have an emit method"
|
||||
) from None
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=ToolUsageErrorEvent(
|
||||
tool_name=function_name,
|
||||
tool_args=function_args,
|
||||
error=error_msg,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
# Emit LLM call failed event
|
||||
self._emit_call_failed_event(
|
||||
error=error_msg,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
)
|
||||
|
||||
return None
|
||||
|
||||
def _format_messages(self, messages: str | list[LLMMessage]) -> list[LLMMessage]:
|
||||
"""Convert messages to standard format.
|
||||
|
||||
Args:
|
||||
messages: Input messages (string or list of message dicts)
|
||||
|
||||
Returns:
|
||||
List of message dictionaries with 'role' and 'content' keys
|
||||
|
||||
Raises:
|
||||
ValueError: If message format is invalid
|
||||
"""
|
||||
if isinstance(messages, str):
|
||||
return [{"role": "user", "content": messages}]
|
||||
|
||||
# Validate message format
|
||||
for i, msg in enumerate(messages):
|
||||
if not isinstance(msg, dict):
|
||||
raise ValueError(f"Message at index {i} must be a dictionary")
|
||||
if "role" not in msg or "content" not in msg:
|
||||
raise ValueError(
|
||||
f"Message at index {i} must have 'role' and 'content' keys"
|
||||
)
|
||||
|
||||
return messages
|
||||
|
||||
@staticmethod
|
||||
def _validate_structured_output(
|
||||
response: str,
|
||||
response_format: type[BaseModel] | None,
|
||||
) -> str | BaseModel:
|
||||
"""Validate and parse structured output.
|
||||
|
||||
Args:
|
||||
response: Raw response string
|
||||
response_format: Optional Pydantic model for structured output
|
||||
|
||||
Returns:
|
||||
Parsed response (BaseModel instance if response_format provided, otherwise string)
|
||||
|
||||
Raises:
|
||||
ValueError: If structured output validation fails
|
||||
"""
|
||||
if response_format is None:
|
||||
return response
|
||||
|
||||
try:
|
||||
# Try to parse as JSON first
|
||||
if response.strip().startswith("{") or response.strip().startswith("["):
|
||||
data = json.loads(response)
|
||||
return response_format.model_validate(data)
|
||||
|
||||
json_match = _JSON_EXTRACTION_PATTERN.search(response)
|
||||
if json_match:
|
||||
data = json.loads(json_match.group())
|
||||
return response_format.model_validate(data)
|
||||
|
||||
raise ValueError("No JSON found in response")
|
||||
|
||||
except (json.JSONDecodeError, ValueError) as e:
|
||||
logging.warning(f"Failed to parse structured output: {e}")
|
||||
raise ValueError(
|
||||
f"Failed to parse response into {response_format.__name__}: {e}"
|
||||
) from e
|
||||
|
||||
@staticmethod
|
||||
def _extract_provider(model: str) -> str:
|
||||
"""Extract provider from model string.
|
||||
|
||||
Args:
|
||||
model: Model string (e.g., 'openai/gpt-4' or 'gpt-4')
|
||||
|
||||
Returns:
|
||||
Provider name (e.g., 'openai')
|
||||
"""
|
||||
if "/" in model:
|
||||
return model.partition("/")[0]
|
||||
return "openai" # Default provider
|
||||
|
||||
def _track_token_usage_internal(self, usage_data: dict[str, Any]) -> None:
|
||||
"""Track token usage internally in the LLM instance.
|
||||
|
||||
Args:
|
||||
usage_data: Token usage data from the API response
|
||||
"""
|
||||
# Extract tokens in a provider-agnostic way
|
||||
prompt_tokens = (
|
||||
usage_data.get("prompt_tokens")
|
||||
or usage_data.get("prompt_token_count")
|
||||
or usage_data.get("input_tokens")
|
||||
or 0
|
||||
)
|
||||
|
||||
completion_tokens = (
|
||||
usage_data.get("completion_tokens")
|
||||
or usage_data.get("candidates_token_count")
|
||||
or usage_data.get("output_tokens")
|
||||
or 0
|
||||
)
|
||||
|
||||
cached_tokens = (
|
||||
usage_data.get("cached_tokens")
|
||||
or usage_data.get("cached_prompt_tokens")
|
||||
or 0
|
||||
)
|
||||
|
||||
self._token_usage["prompt_tokens"] += prompt_tokens
|
||||
self._token_usage["completion_tokens"] += completion_tokens
|
||||
self._token_usage["total_tokens"] += prompt_tokens + completion_tokens
|
||||
self._token_usage["successful_requests"] += 1
|
||||
self._token_usage["cached_prompt_tokens"] += cached_tokens
|
||||
|
||||
def get_token_usage_summary(self) -> UsageMetrics:
|
||||
"""Get summary of token usage for this LLM instance.
|
||||
|
||||
Returns:
|
||||
Dictionary with token usage totals
|
||||
"""
|
||||
return UsageMetrics(**self._token_usage)
|
||||
from crewai.llm.base_llm import * # noqa: E402, F403
|
||||
|
||||
@@ -1,558 +1,15 @@
|
||||
from typing import Literal, TypeAlias
|
||||
"""Deprecated: Use crewai.llm.constants instead.
|
||||
|
||||
.. deprecated:: 1.4.0
|
||||
"""
|
||||
|
||||
import warnings
|
||||
|
||||
|
||||
OpenAIModels: TypeAlias = Literal[
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-0301",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
"gpt-3.5-turbo-instruct",
|
||||
"gpt-3.5-turbo-instruct-0914",
|
||||
"gpt-4",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4.1",
|
||||
"gpt-4.1-2025-04-14",
|
||||
"gpt-4.1-mini",
|
||||
"gpt-4.1-mini-2025-04-14",
|
||||
"gpt-4.1-nano",
|
||||
"gpt-4.1-nano-2025-04-14",
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4o-2024-08-06",
|
||||
"gpt-4o-2024-11-20",
|
||||
"gpt-4o-audio-preview",
|
||||
"gpt-4o-audio-preview-2024-10-01",
|
||||
"gpt-4o-audio-preview-2024-12-17",
|
||||
"gpt-4o-audio-preview-2025-06-03",
|
||||
"gpt-4o-mini",
|
||||
"gpt-4o-mini-2024-07-18",
|
||||
"gpt-4o-mini-audio-preview",
|
||||
"gpt-4o-mini-audio-preview-2024-12-17",
|
||||
"gpt-4o-mini-realtime-preview",
|
||||
"gpt-4o-mini-realtime-preview-2024-12-17",
|
||||
"gpt-4o-mini-search-preview",
|
||||
"gpt-4o-mini-search-preview-2025-03-11",
|
||||
"gpt-4o-mini-transcribe",
|
||||
"gpt-4o-mini-tts",
|
||||
"gpt-4o-realtime-preview",
|
||||
"gpt-4o-realtime-preview-2024-10-01",
|
||||
"gpt-4o-realtime-preview-2024-12-17",
|
||||
"gpt-4o-realtime-preview-2025-06-03",
|
||||
"gpt-4o-search-preview",
|
||||
"gpt-4o-search-preview-2025-03-11",
|
||||
"gpt-4o-transcribe",
|
||||
"gpt-4o-transcribe-diarize",
|
||||
"gpt-5",
|
||||
"gpt-5-2025-08-07",
|
||||
"gpt-5-chat",
|
||||
"gpt-5-chat-latest",
|
||||
"gpt-5-codex",
|
||||
"gpt-5-mini",
|
||||
"gpt-5-mini-2025-08-07",
|
||||
"gpt-5-nano",
|
||||
"gpt-5-nano-2025-08-07",
|
||||
"gpt-5-pro",
|
||||
"gpt-5-pro-2025-10-06",
|
||||
"gpt-5-search-api",
|
||||
"gpt-5-search-api-2025-10-14",
|
||||
"gpt-audio",
|
||||
"gpt-audio-2025-08-28",
|
||||
"gpt-audio-mini",
|
||||
"gpt-audio-mini-2025-10-06",
|
||||
"gpt-image-1",
|
||||
"gpt-image-1-mini",
|
||||
"gpt-realtime",
|
||||
"gpt-realtime-2025-08-28",
|
||||
"gpt-realtime-mini",
|
||||
"gpt-realtime-mini-2025-10-06",
|
||||
"o1",
|
||||
"o1-preview",
|
||||
"o1-2024-12-17",
|
||||
"o1-mini",
|
||||
"o1-mini-2024-09-12",
|
||||
"o1-pro",
|
||||
"o1-pro-2025-03-19",
|
||||
"o3-mini",
|
||||
"o3",
|
||||
"o4-mini",
|
||||
"whisper-1",
|
||||
]
|
||||
OPENAI_MODELS: list[OpenAIModels] = [
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-3.5-turbo-0301",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
"gpt-3.5-turbo-instruct",
|
||||
"gpt-3.5-turbo-instruct-0914",
|
||||
"gpt-4",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4.1",
|
||||
"gpt-4.1-2025-04-14",
|
||||
"gpt-4.1-mini",
|
||||
"gpt-4.1-mini-2025-04-14",
|
||||
"gpt-4.1-nano",
|
||||
"gpt-4.1-nano-2025-04-14",
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4o-2024-08-06",
|
||||
"gpt-4o-2024-11-20",
|
||||
"gpt-4o-audio-preview",
|
||||
"gpt-4o-audio-preview-2024-10-01",
|
||||
"gpt-4o-audio-preview-2024-12-17",
|
||||
"gpt-4o-audio-preview-2025-06-03",
|
||||
"gpt-4o-mini",
|
||||
"gpt-4o-mini-2024-07-18",
|
||||
"gpt-4o-mini-audio-preview",
|
||||
"gpt-4o-mini-audio-preview-2024-12-17",
|
||||
"gpt-4o-mini-realtime-preview",
|
||||
"gpt-4o-mini-realtime-preview-2024-12-17",
|
||||
"gpt-4o-mini-search-preview",
|
||||
"gpt-4o-mini-search-preview-2025-03-11",
|
||||
"gpt-4o-mini-transcribe",
|
||||
"gpt-4o-mini-tts",
|
||||
"gpt-4o-realtime-preview",
|
||||
"gpt-4o-realtime-preview-2024-10-01",
|
||||
"gpt-4o-realtime-preview-2024-12-17",
|
||||
"gpt-4o-realtime-preview-2025-06-03",
|
||||
"gpt-4o-search-preview",
|
||||
"gpt-4o-search-preview-2025-03-11",
|
||||
"gpt-4o-transcribe",
|
||||
"gpt-4o-transcribe-diarize",
|
||||
"gpt-5",
|
||||
"gpt-5-2025-08-07",
|
||||
"gpt-5-chat",
|
||||
"gpt-5-chat-latest",
|
||||
"gpt-5-codex",
|
||||
"gpt-5-mini",
|
||||
"gpt-5-mini-2025-08-07",
|
||||
"gpt-5-nano",
|
||||
"gpt-5-nano-2025-08-07",
|
||||
"gpt-5-pro",
|
||||
"gpt-5-pro-2025-10-06",
|
||||
"gpt-5-search-api",
|
||||
"gpt-5-search-api-2025-10-14",
|
||||
"gpt-audio",
|
||||
"gpt-audio-2025-08-28",
|
||||
"gpt-audio-mini",
|
||||
"gpt-audio-mini-2025-10-06",
|
||||
"gpt-image-1",
|
||||
"gpt-image-1-mini",
|
||||
"gpt-realtime",
|
||||
"gpt-realtime-2025-08-28",
|
||||
"gpt-realtime-mini",
|
||||
"gpt-realtime-mini-2025-10-06",
|
||||
"o1",
|
||||
"o1-preview",
|
||||
"o1-2024-12-17",
|
||||
"o1-mini",
|
||||
"o1-mini-2024-09-12",
|
||||
"o1-pro",
|
||||
"o1-pro-2025-03-19",
|
||||
"o3-mini",
|
||||
"o3",
|
||||
"o4-mini",
|
||||
"whisper-1",
|
||||
]
|
||||
warnings.warn(
|
||||
"crewai.llms.constants is deprecated. Use crewai.llm.constants instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
|
||||
AnthropicModels: TypeAlias = Literal[
|
||||
"claude-3-7-sonnet-latest",
|
||||
"claude-3-7-sonnet-20250219",
|
||||
"claude-3-5-haiku-latest",
|
||||
"claude-3-5-haiku-20241022",
|
||||
"claude-haiku-4-5",
|
||||
"claude-haiku-4-5-20251001",
|
||||
"claude-sonnet-4-20250514",
|
||||
"claude-sonnet-4-0",
|
||||
"claude-4-sonnet-20250514",
|
||||
"claude-sonnet-4-5",
|
||||
"claude-sonnet-4-5-20250929",
|
||||
"claude-3-5-sonnet-latest",
|
||||
"claude-3-5-sonnet-20241022",
|
||||
"claude-3-5-sonnet-20240620",
|
||||
"claude-opus-4-0",
|
||||
"claude-opus-4-20250514",
|
||||
"claude-4-opus-20250514",
|
||||
"claude-opus-4-1",
|
||||
"claude-opus-4-1-20250805",
|
||||
"claude-3-opus-latest",
|
||||
"claude-3-opus-20240229",
|
||||
"claude-3-sonnet-20240229",
|
||||
"claude-3-haiku-latest",
|
||||
"claude-3-haiku-20240307",
|
||||
]
|
||||
ANTHROPIC_MODELS: list[AnthropicModels] = [
|
||||
"claude-3-7-sonnet-latest",
|
||||
"claude-3-7-sonnet-20250219",
|
||||
"claude-3-5-haiku-latest",
|
||||
"claude-3-5-haiku-20241022",
|
||||
"claude-haiku-4-5",
|
||||
"claude-haiku-4-5-20251001",
|
||||
"claude-sonnet-4-20250514",
|
||||
"claude-sonnet-4-0",
|
||||
"claude-4-sonnet-20250514",
|
||||
"claude-sonnet-4-5",
|
||||
"claude-sonnet-4-5-20250929",
|
||||
"claude-3-5-sonnet-latest",
|
||||
"claude-3-5-sonnet-20241022",
|
||||
"claude-3-5-sonnet-20240620",
|
||||
"claude-opus-4-0",
|
||||
"claude-opus-4-20250514",
|
||||
"claude-4-opus-20250514",
|
||||
"claude-opus-4-1",
|
||||
"claude-opus-4-1-20250805",
|
||||
"claude-3-opus-latest",
|
||||
"claude-3-opus-20240229",
|
||||
"claude-3-sonnet-20240229",
|
||||
"claude-3-haiku-latest",
|
||||
"claude-3-haiku-20240307",
|
||||
]
|
||||
|
||||
GeminiModels: TypeAlias = Literal[
|
||||
"gemini-2.5-pro",
|
||||
"gemini-2.5-pro-preview-03-25",
|
||||
"gemini-2.5-pro-preview-05-06",
|
||||
"gemini-2.5-pro-preview-06-05",
|
||||
"gemini-2.5-flash",
|
||||
"gemini-2.5-flash-preview-05-20",
|
||||
"gemini-2.5-flash-preview-04-17",
|
||||
"gemini-2.5-flash-image",
|
||||
"gemini-2.5-flash-image-preview",
|
||||
"gemini-2.5-flash-lite",
|
||||
"gemini-2.5-flash-lite-preview-06-17",
|
||||
"gemini-2.5-flash-preview-09-2025",
|
||||
"gemini-2.5-flash-lite-preview-09-2025",
|
||||
"gemini-2.5-flash-preview-tts",
|
||||
"gemini-2.5-pro-preview-tts",
|
||||
"gemini-2.5-computer-use-preview-10-2025",
|
||||
"gemini-2.0-flash",
|
||||
"gemini-2.0-flash-001",
|
||||
"gemini-2.0-flash-exp",
|
||||
"gemini-2.0-flash-exp-image-generation",
|
||||
"gemini-2.0-flash-lite",
|
||||
"gemini-2.0-flash-lite-001",
|
||||
"gemini-2.0-flash-lite-preview",
|
||||
"gemini-2.0-flash-lite-preview-02-05",
|
||||
"gemini-2.0-flash-preview-image-generation",
|
||||
"gemini-2.0-flash-thinking-exp",
|
||||
"gemini-2.0-flash-thinking-exp-01-21",
|
||||
"gemini-2.0-flash-thinking-exp-1219",
|
||||
"gemini-2.0-pro-exp",
|
||||
"gemini-2.0-pro-exp-02-05",
|
||||
"gemini-exp-1206",
|
||||
"gemini-1.5-pro",
|
||||
"gemini-1.5-flash",
|
||||
"gemini-1.5-flash-8b",
|
||||
"gemini-flash-latest",
|
||||
"gemini-flash-lite-latest",
|
||||
"gemini-pro-latest",
|
||||
"gemini-2.0-flash-live-001",
|
||||
"gemini-live-2.5-flash-preview",
|
||||
"gemini-2.5-flash-live-preview",
|
||||
"gemini-robotics-er-1.5-preview",
|
||||
"gemini-gemma-2-27b-it",
|
||||
"gemini-gemma-2-9b-it",
|
||||
"gemma-3-1b-it",
|
||||
"gemma-3-4b-it",
|
||||
"gemma-3-12b-it",
|
||||
"gemma-3-27b-it",
|
||||
"gemma-3n-e2b-it",
|
||||
"gemma-3n-e4b-it",
|
||||
"learnlm-2.0-flash-experimental",
|
||||
]
|
||||
GEMINI_MODELS: list[GeminiModels] = [
|
||||
"gemini-2.5-pro",
|
||||
"gemini-2.5-pro-preview-03-25",
|
||||
"gemini-2.5-pro-preview-05-06",
|
||||
"gemini-2.5-pro-preview-06-05",
|
||||
"gemini-2.5-flash",
|
||||
"gemini-2.5-flash-preview-05-20",
|
||||
"gemini-2.5-flash-preview-04-17",
|
||||
"gemini-2.5-flash-image",
|
||||
"gemini-2.5-flash-image-preview",
|
||||
"gemini-2.5-flash-lite",
|
||||
"gemini-2.5-flash-lite-preview-06-17",
|
||||
"gemini-2.5-flash-preview-09-2025",
|
||||
"gemini-2.5-flash-lite-preview-09-2025",
|
||||
"gemini-2.5-flash-preview-tts",
|
||||
"gemini-2.5-pro-preview-tts",
|
||||
"gemini-2.5-computer-use-preview-10-2025",
|
||||
"gemini-2.0-flash",
|
||||
"gemini-2.0-flash-001",
|
||||
"gemini-2.0-flash-exp",
|
||||
"gemini-2.0-flash-exp-image-generation",
|
||||
"gemini-2.0-flash-lite",
|
||||
"gemini-2.0-flash-lite-001",
|
||||
"gemini-2.0-flash-lite-preview",
|
||||
"gemini-2.0-flash-lite-preview-02-05",
|
||||
"gemini-2.0-flash-preview-image-generation",
|
||||
"gemini-2.0-flash-thinking-exp",
|
||||
"gemini-2.0-flash-thinking-exp-01-21",
|
||||
"gemini-2.0-flash-thinking-exp-1219",
|
||||
"gemini-2.0-pro-exp",
|
||||
"gemini-2.0-pro-exp-02-05",
|
||||
"gemini-exp-1206",
|
||||
"gemini-1.5-pro",
|
||||
"gemini-1.5-flash",
|
||||
"gemini-1.5-flash-8b",
|
||||
"gemini-flash-latest",
|
||||
"gemini-flash-lite-latest",
|
||||
"gemini-pro-latest",
|
||||
"gemini-2.0-flash-live-001",
|
||||
"gemini-live-2.5-flash-preview",
|
||||
"gemini-2.5-flash-live-preview",
|
||||
"gemini-robotics-er-1.5-preview",
|
||||
"gemini-gemma-2-27b-it",
|
||||
"gemini-gemma-2-9b-it",
|
||||
"gemma-3-1b-it",
|
||||
"gemma-3-4b-it",
|
||||
"gemma-3-12b-it",
|
||||
"gemma-3-27b-it",
|
||||
"gemma-3n-e2b-it",
|
||||
"gemma-3n-e4b-it",
|
||||
"learnlm-2.0-flash-experimental",
|
||||
]
|
||||
|
||||
|
||||
AzureModels: TypeAlias = Literal[
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-0301",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
"gpt-35-turbo",
|
||||
"gpt-35-turbo-0125",
|
||||
"gpt-35-turbo-1106",
|
||||
"gpt-35-turbo-16k-0613",
|
||||
"gpt-35-turbo-instruct-0914",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-vision",
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4o-2024-08-06",
|
||||
"gpt-4o-2024-11-20",
|
||||
"gpt-4o-mini",
|
||||
"gpt-5",
|
||||
"o1",
|
||||
"o1-mini",
|
||||
"o1-preview",
|
||||
"o3-mini",
|
||||
"o3",
|
||||
"o4-mini",
|
||||
]
|
||||
AZURE_MODELS: list[AzureModels] = [
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-0301",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
"gpt-35-turbo",
|
||||
"gpt-35-turbo-0125",
|
||||
"gpt-35-turbo-1106",
|
||||
"gpt-35-turbo-16k-0613",
|
||||
"gpt-35-turbo-instruct-0914",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-4-turbo",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-vision",
|
||||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4o-2024-08-06",
|
||||
"gpt-4o-2024-11-20",
|
||||
"gpt-4o-mini",
|
||||
"gpt-5",
|
||||
"o1",
|
||||
"o1-mini",
|
||||
"o1-preview",
|
||||
"o3-mini",
|
||||
"o3",
|
||||
"o4-mini",
|
||||
]
|
||||
|
||||
|
||||
BedrockModels: TypeAlias = Literal[
|
||||
"ai21.jamba-1-5-large-v1:0",
|
||||
"ai21.jamba-1-5-mini-v1:0",
|
||||
"amazon.nova-lite-v1:0",
|
||||
"amazon.nova-lite-v1:0:24k",
|
||||
"amazon.nova-lite-v1:0:300k",
|
||||
"amazon.nova-micro-v1:0",
|
||||
"amazon.nova-micro-v1:0:128k",
|
||||
"amazon.nova-micro-v1:0:24k",
|
||||
"amazon.nova-premier-v1:0",
|
||||
"amazon.nova-premier-v1:0:1000k",
|
||||
"amazon.nova-premier-v1:0:20k",
|
||||
"amazon.nova-premier-v1:0:8k",
|
||||
"amazon.nova-premier-v1:0:mm",
|
||||
"amazon.nova-pro-v1:0",
|
||||
"amazon.nova-pro-v1:0:24k",
|
||||
"amazon.nova-pro-v1:0:300k",
|
||||
"amazon.titan-text-express-v1",
|
||||
"amazon.titan-text-express-v1:0:8k",
|
||||
"amazon.titan-text-lite-v1",
|
||||
"amazon.titan-text-lite-v1:0:4k",
|
||||
"amazon.titan-tg1-large",
|
||||
"anthropic.claude-3-5-haiku-20241022-v1:0",
|
||||
"anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||
"anthropic.claude-3-5-sonnet-20241022-v2:0",
|
||||
"anthropic.claude-3-7-sonnet-20250219-v1:0",
|
||||
"anthropic.claude-3-haiku-20240307-v1:0",
|
||||
"anthropic.claude-3-haiku-20240307-v1:0:200k",
|
||||
"anthropic.claude-3-haiku-20240307-v1:0:48k",
|
||||
"anthropic.claude-3-opus-20240229-v1:0",
|
||||
"anthropic.claude-3-opus-20240229-v1:0:12k",
|
||||
"anthropic.claude-3-opus-20240229-v1:0:200k",
|
||||
"anthropic.claude-3-opus-20240229-v1:0:28k",
|
||||
"anthropic.claude-3-sonnet-20240229-v1:0",
|
||||
"anthropic.claude-3-sonnet-20240229-v1:0:200k",
|
||||
"anthropic.claude-3-sonnet-20240229-v1:0:28k",
|
||||
"anthropic.claude-haiku-4-5-20251001-v1:0",
|
||||
"anthropic.claude-instant-v1:2:100k",
|
||||
"anthropic.claude-opus-4-1-20250805-v1:0",
|
||||
"anthropic.claude-opus-4-20250514-v1:0",
|
||||
"anthropic.claude-sonnet-4-20250514-v1:0",
|
||||
"anthropic.claude-sonnet-4-5-20250929-v1:0",
|
||||
"anthropic.claude-v2:0:100k",
|
||||
"anthropic.claude-v2:0:18k",
|
||||
"anthropic.claude-v2:1:18k",
|
||||
"anthropic.claude-v2:1:200k",
|
||||
"cohere.command-r-plus-v1:0",
|
||||
"cohere.command-r-v1:0",
|
||||
"cohere.rerank-v3-5:0",
|
||||
"deepseek.r1-v1:0",
|
||||
"meta.llama3-1-70b-instruct-v1:0",
|
||||
"meta.llama3-1-8b-instruct-v1:0",
|
||||
"meta.llama3-2-11b-instruct-v1:0",
|
||||
"meta.llama3-2-1b-instruct-v1:0",
|
||||
"meta.llama3-2-3b-instruct-v1:0",
|
||||
"meta.llama3-2-90b-instruct-v1:0",
|
||||
"meta.llama3-3-70b-instruct-v1:0",
|
||||
"meta.llama3-70b-instruct-v1:0",
|
||||
"meta.llama3-8b-instruct-v1:0",
|
||||
"meta.llama4-maverick-17b-instruct-v1:0",
|
||||
"meta.llama4-scout-17b-instruct-v1:0",
|
||||
"mistral.mistral-7b-instruct-v0:2",
|
||||
"mistral.mistral-large-2402-v1:0",
|
||||
"mistral.mistral-small-2402-v1:0",
|
||||
"mistral.mixtral-8x7b-instruct-v0:1",
|
||||
"mistral.pixtral-large-2502-v1:0",
|
||||
"openai.gpt-oss-120b-1:0",
|
||||
"openai.gpt-oss-20b-1:0",
|
||||
"qwen.qwen3-32b-v1:0",
|
||||
"qwen.qwen3-coder-30b-a3b-v1:0",
|
||||
"twelvelabs.pegasus-1-2-v1:0",
|
||||
]
|
||||
BEDROCK_MODELS: list[BedrockModels] = [
|
||||
"ai21.jamba-1-5-large-v1:0",
|
||||
"ai21.jamba-1-5-mini-v1:0",
|
||||
"amazon.nova-lite-v1:0",
|
||||
"amazon.nova-lite-v1:0:24k",
|
||||
"amazon.nova-lite-v1:0:300k",
|
||||
"amazon.nova-micro-v1:0",
|
||||
"amazon.nova-micro-v1:0:128k",
|
||||
"amazon.nova-micro-v1:0:24k",
|
||||
"amazon.nova-premier-v1:0",
|
||||
"amazon.nova-premier-v1:0:1000k",
|
||||
"amazon.nova-premier-v1:0:20k",
|
||||
"amazon.nova-premier-v1:0:8k",
|
||||
"amazon.nova-premier-v1:0:mm",
|
||||
"amazon.nova-pro-v1:0",
|
||||
"amazon.nova-pro-v1:0:24k",
|
||||
"amazon.nova-pro-v1:0:300k",
|
||||
"amazon.titan-text-express-v1",
|
||||
"amazon.titan-text-express-v1:0:8k",
|
||||
"amazon.titan-text-lite-v1",
|
||||
"amazon.titan-text-lite-v1:0:4k",
|
||||
"amazon.titan-tg1-large",
|
||||
"anthropic.claude-3-5-haiku-20241022-v1:0",
|
||||
"anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||
"anthropic.claude-3-5-sonnet-20241022-v2:0",
|
||||
"anthropic.claude-3-7-sonnet-20250219-v1:0",
|
||||
"anthropic.claude-3-haiku-20240307-v1:0",
|
||||
"anthropic.claude-3-haiku-20240307-v1:0:200k",
|
||||
"anthropic.claude-3-haiku-20240307-v1:0:48k",
|
||||
"anthropic.claude-3-opus-20240229-v1:0",
|
||||
"anthropic.claude-3-opus-20240229-v1:0:12k",
|
||||
"anthropic.claude-3-opus-20240229-v1:0:200k",
|
||||
"anthropic.claude-3-opus-20240229-v1:0:28k",
|
||||
"anthropic.claude-3-sonnet-20240229-v1:0",
|
||||
"anthropic.claude-3-sonnet-20240229-v1:0:200k",
|
||||
"anthropic.claude-3-sonnet-20240229-v1:0:28k",
|
||||
"anthropic.claude-haiku-4-5-20251001-v1:0",
|
||||
"anthropic.claude-instant-v1:2:100k",
|
||||
"anthropic.claude-opus-4-1-20250805-v1:0",
|
||||
"anthropic.claude-opus-4-20250514-v1:0",
|
||||
"anthropic.claude-sonnet-4-20250514-v1:0",
|
||||
"anthropic.claude-sonnet-4-5-20250929-v1:0",
|
||||
"anthropic.claude-v2:0:100k",
|
||||
"anthropic.claude-v2:0:18k",
|
||||
"anthropic.claude-v2:1:18k",
|
||||
"anthropic.claude-v2:1:200k",
|
||||
"cohere.command-r-plus-v1:0",
|
||||
"cohere.command-r-v1:0",
|
||||
"cohere.rerank-v3-5:0",
|
||||
"deepseek.r1-v1:0",
|
||||
"meta.llama3-1-70b-instruct-v1:0",
|
||||
"meta.llama3-1-8b-instruct-v1:0",
|
||||
"meta.llama3-2-11b-instruct-v1:0",
|
||||
"meta.llama3-2-1b-instruct-v1:0",
|
||||
"meta.llama3-2-3b-instruct-v1:0",
|
||||
"meta.llama3-2-90b-instruct-v1:0",
|
||||
"meta.llama3-3-70b-instruct-v1:0",
|
||||
"meta.llama3-70b-instruct-v1:0",
|
||||
"meta.llama3-8b-instruct-v1:0",
|
||||
"meta.llama4-maverick-17b-instruct-v1:0",
|
||||
"meta.llama4-scout-17b-instruct-v1:0",
|
||||
"mistral.mistral-7b-instruct-v0:2",
|
||||
"mistral.mistral-large-2402-v1:0",
|
||||
"mistral.mistral-small-2402-v1:0",
|
||||
"mistral.mixtral-8x7b-instruct-v0:1",
|
||||
"mistral.pixtral-large-2502-v1:0",
|
||||
"openai.gpt-oss-120b-1:0",
|
||||
"openai.gpt-oss-20b-1:0",
|
||||
"qwen.qwen3-32b-v1:0",
|
||||
"qwen.qwen3-coder-30b-a3b-v1:0",
|
||||
"twelvelabs.pegasus-1-2-v1:0",
|
||||
]
|
||||
from crewai.llm.constants import * # noqa: E402, F403
|
||||
|
||||
@@ -1,6 +1,15 @@
|
||||
"""Interceptor contracts for crewai"""
|
||||
"""Deprecated: Use crewai.llm.hooks instead.
|
||||
|
||||
from crewai.llms.hooks.base import BaseInterceptor
|
||||
.. deprecated:: 1.4.0
|
||||
"""
|
||||
|
||||
import warnings
|
||||
|
||||
|
||||
__all__ = ["BaseInterceptor"]
|
||||
warnings.warn(
|
||||
"crewai.llms.hooks is deprecated. Use crewai.llm.hooks instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
from crewai.llm.hooks import * # noqa: E402, F403
|
||||
|
||||
@@ -1,133 +1,15 @@
|
||||
"""Base classes for LLM transport interceptors.
|
||||
"""Deprecated: Use crewai.llm.hooks.base instead.
|
||||
|
||||
This module provides abstract base classes for intercepting and modifying
|
||||
outbound and inbound messages at the transport level.
|
||||
.. deprecated:: 1.4.0
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import TYPE_CHECKING, Any, Generic, TypeVar
|
||||
|
||||
from pydantic_core import core_schema
|
||||
import warnings
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pydantic import GetCoreSchemaHandler
|
||||
from pydantic_core import CoreSchema
|
||||
warnings.warn(
|
||||
"crewai.llms.hooks.base is deprecated. Use crewai.llm.hooks.base instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
U = TypeVar("U")
|
||||
|
||||
|
||||
class BaseInterceptor(ABC, Generic[T, U]):
|
||||
"""Abstract base class for intercepting transport-level messages.
|
||||
|
||||
Provides hooks to intercept and modify outbound and inbound messages
|
||||
at the transport layer.
|
||||
|
||||
Type parameters:
|
||||
T: Outbound message type (e.g., httpx.Request)
|
||||
U: Inbound message type (e.g., httpx.Response)
|
||||
|
||||
Example:
|
||||
>>> import httpx
|
||||
>>> class CustomInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
|
||||
... def on_outbound(self, message: httpx.Request) -> httpx.Request:
|
||||
... message.headers["X-Custom-Header"] = "value"
|
||||
... return message
|
||||
...
|
||||
... def on_inbound(self, message: httpx.Response) -> httpx.Response:
|
||||
... print(f"Status: {message.status_code}")
|
||||
... return message
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def on_outbound(self, message: T) -> T:
|
||||
"""Intercept outbound message before sending.
|
||||
|
||||
Args:
|
||||
message: Outbound message object.
|
||||
|
||||
Returns:
|
||||
Modified message object.
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def on_inbound(self, message: U) -> U:
|
||||
"""Intercept inbound message after receiving.
|
||||
|
||||
Args:
|
||||
message: Inbound message object.
|
||||
|
||||
Returns:
|
||||
Modified message object.
|
||||
"""
|
||||
...
|
||||
|
||||
async def aon_outbound(self, message: T) -> T:
|
||||
"""Async version of on_outbound.
|
||||
|
||||
Args:
|
||||
message: Outbound message object.
|
||||
|
||||
Returns:
|
||||
Modified message object.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def aon_inbound(self, message: U) -> U:
|
||||
"""Async version of on_inbound.
|
||||
|
||||
Args:
|
||||
message: Inbound message object.
|
||||
|
||||
Returns:
|
||||
Modified message object.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@classmethod
|
||||
def __get_pydantic_core_schema__(
|
||||
cls, _source_type: Any, _handler: GetCoreSchemaHandler
|
||||
) -> CoreSchema:
|
||||
"""Generate Pydantic core schema for BaseInterceptor.
|
||||
|
||||
This allows the generic BaseInterceptor to be used in Pydantic models
|
||||
without requiring arbitrary_types_allowed=True. The schema validates
|
||||
that the value is an instance of BaseInterceptor.
|
||||
|
||||
Args:
|
||||
_source_type: The source type being validated (unused).
|
||||
_handler: Handler for generating schemas (unused).
|
||||
|
||||
Returns:
|
||||
A Pydantic core schema that validates BaseInterceptor instances.
|
||||
"""
|
||||
return core_schema.no_info_plain_validator_function(
|
||||
_validate_interceptor,
|
||||
serialization=core_schema.plain_serializer_function_ser_schema(
|
||||
lambda x: x, return_schema=core_schema.any_schema()
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def _validate_interceptor(value: Any) -> BaseInterceptor[T, U]:
|
||||
"""Validate that the value is a BaseInterceptor instance.
|
||||
|
||||
Args:
|
||||
value: The value to validate.
|
||||
|
||||
Returns:
|
||||
The validated BaseInterceptor instance.
|
||||
|
||||
Raises:
|
||||
ValueError: If the value is not a BaseInterceptor instance.
|
||||
"""
|
||||
if not isinstance(value, BaseInterceptor):
|
||||
raise ValueError(
|
||||
f"Expected BaseInterceptor instance, got {type(value).__name__}"
|
||||
)
|
||||
return value
|
||||
from crewai.llm.hooks.base import * # noqa: E402, F403
|
||||
|
||||
@@ -1,123 +1,15 @@
|
||||
"""HTTP transport implementations for LLM request/response interception.
|
||||
"""Deprecated: Use crewai.llm.hooks.transport instead.
|
||||
|
||||
This module provides internal transport classes that integrate with BaseInterceptor
|
||||
to enable request/response modification at the transport level.
|
||||
.. deprecated:: 1.4.0
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
import warnings
|
||||
|
||||
from collections.abc import Iterable
|
||||
from typing import TYPE_CHECKING, TypedDict
|
||||
|
||||
from httpx import (
|
||||
AsyncHTTPTransport as _AsyncHTTPTransport,
|
||||
HTTPTransport as _HTTPTransport,
|
||||
warnings.warn(
|
||||
"crewai.llms.hooks.transport is deprecated. Use crewai.llm.hooks.transport instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
from typing_extensions import NotRequired, Unpack
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ssl import SSLContext
|
||||
|
||||
from httpx import Limits, Request, Response
|
||||
from httpx._types import CertTypes, ProxyTypes
|
||||
|
||||
from crewai.llms.hooks.base import BaseInterceptor
|
||||
|
||||
|
||||
class HTTPTransportKwargs(TypedDict, total=False):
|
||||
"""Typed dictionary for httpx.HTTPTransport initialization parameters.
|
||||
|
||||
These parameters configure the underlying HTTP transport behavior including
|
||||
SSL verification, proxies, connection limits, and low-level socket options.
|
||||
"""
|
||||
|
||||
verify: bool | str | SSLContext
|
||||
cert: NotRequired[CertTypes]
|
||||
trust_env: bool
|
||||
http1: bool
|
||||
http2: bool
|
||||
limits: Limits
|
||||
proxy: NotRequired[ProxyTypes]
|
||||
uds: NotRequired[str]
|
||||
local_address: NotRequired[str]
|
||||
retries: int
|
||||
socket_options: NotRequired[
|
||||
Iterable[
|
||||
tuple[int, int, int]
|
||||
| tuple[int, int, bytes | bytearray]
|
||||
| tuple[int, int, None, int]
|
||||
]
|
||||
]
|
||||
|
||||
|
||||
class HTTPTransport(_HTTPTransport):
|
||||
"""HTTP transport that uses an interceptor for request/response modification.
|
||||
|
||||
This transport is used internally when a user provides a BaseInterceptor.
|
||||
Users should not instantiate this class directly - instead, pass an interceptor
|
||||
to the LLM client and this transport will be created automatically.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
interceptor: BaseInterceptor[Request, Response],
|
||||
**kwargs: Unpack[HTTPTransportKwargs],
|
||||
) -> None:
|
||||
"""Initialize transport with interceptor.
|
||||
|
||||
Args:
|
||||
interceptor: HTTP interceptor for modifying raw request/response objects.
|
||||
**kwargs: HTTPTransport configuration parameters (verify, cert, proxy, etc.).
|
||||
"""
|
||||
super().__init__(**kwargs)
|
||||
self.interceptor = interceptor
|
||||
|
||||
def handle_request(self, request: Request) -> Response:
|
||||
"""Handle request with interception.
|
||||
|
||||
Args:
|
||||
request: The HTTP request to handle.
|
||||
|
||||
Returns:
|
||||
The HTTP response.
|
||||
"""
|
||||
request = self.interceptor.on_outbound(request)
|
||||
response = super().handle_request(request)
|
||||
return self.interceptor.on_inbound(response)
|
||||
|
||||
|
||||
class AsyncHTTPTransport(_AsyncHTTPTransport):
|
||||
"""Async HTTP transport that uses an interceptor for request/response modification.
|
||||
|
||||
This transport is used internally when a user provides a BaseInterceptor.
|
||||
Users should not instantiate this class directly - instead, pass an interceptor
|
||||
to the LLM client and this transport will be created automatically.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
interceptor: BaseInterceptor[Request, Response],
|
||||
**kwargs: Unpack[HTTPTransportKwargs],
|
||||
) -> None:
|
||||
"""Initialize async transport with interceptor.
|
||||
|
||||
Args:
|
||||
interceptor: HTTP interceptor for modifying raw request/response objects.
|
||||
**kwargs: HTTPTransport configuration parameters (verify, cert, proxy, etc.).
|
||||
"""
|
||||
super().__init__(**kwargs)
|
||||
self.interceptor = interceptor
|
||||
|
||||
async def handle_async_request(self, request: Request) -> Response:
|
||||
"""Handle async request with interception.
|
||||
|
||||
Args:
|
||||
request: The HTTP request to handle.
|
||||
|
||||
Returns:
|
||||
The HTTP response.
|
||||
"""
|
||||
request = await self.interceptor.aon_outbound(request)
|
||||
response = await super().handle_async_request(request)
|
||||
return await self.interceptor.aon_inbound(response)
|
||||
from crewai.llm.hooks.transport import * # noqa: E402, F403
|
||||
|
||||
15
lib/crewai/src/crewai/llms/internal/__init__.py
Normal file
15
lib/crewai/src/crewai/llms/internal/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
||||
"""Deprecated: Use crewai.llm.internal instead.
|
||||
|
||||
.. deprecated:: 1.4.0
|
||||
"""
|
||||
|
||||
import warnings
|
||||
|
||||
|
||||
warnings.warn(
|
||||
"crewai.llms.internal is deprecated. Use crewai.llm.internal instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
from crewai.llm.internal import * # noqa: E402, F403
|
||||
15
lib/crewai/src/crewai/llms/internal/constants.py
Normal file
15
lib/crewai/src/crewai/llms/internal/constants.py
Normal file
@@ -0,0 +1,15 @@
|
||||
"""Deprecated: Use crewai.llm.internal.constants instead.
|
||||
|
||||
.. deprecated:: 1.4.0
|
||||
"""
|
||||
|
||||
import warnings
|
||||
|
||||
|
||||
warnings.warn(
|
||||
"crewai.llms.internal.constants is deprecated. Use crewai.llm.internal.constants instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
from crewai.llm.internal.constants import * # noqa: E402, F403
|
||||
@@ -0,0 +1,15 @@
|
||||
"""Deprecated: Use crewai.llm.providers instead.
|
||||
|
||||
.. deprecated:: 1.4.0
|
||||
"""
|
||||
|
||||
import warnings
|
||||
|
||||
|
||||
warnings.warn(
|
||||
"crewai.llms.providers is deprecated. Use crewai.llm.providers instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
from crewai.llm.providers import * # noqa: E402, F403
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
"""Third-party LLM implementations for crewAI."""
|
||||
@@ -8,7 +8,7 @@ Classes:
|
||||
|
||||
from typing import Any
|
||||
|
||||
from crewai.llm import LLM
|
||||
from crewai.llm.core import LLM
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.utilities.logger import Logger
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.lite_agent_output import LiteAgentOutput
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.llm.base_llm import BaseLLM
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
|
||||
|
||||
|
||||
@@ -36,7 +36,7 @@ if TYPE_CHECKING:
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.lite_agent import LiteAgent
|
||||
from crewai.llm import LLM
|
||||
from crewai.llm.core import LLM
|
||||
from crewai.task import Task
|
||||
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ from crewai.agents.parser import (
|
||||
parse,
|
||||
)
|
||||
from crewai.cli.config import Settings
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.llm.base_llm import BaseLLM
|
||||
from crewai.tools import BaseTool as CrewAITool
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.tools.structured_tool import CrewStructuredTool
|
||||
|
||||
@@ -19,7 +19,7 @@ if TYPE_CHECKING:
|
||||
from crewai.agent import Agent
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.llm import LLM
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.llm.base_llm import BaseLLM
|
||||
|
||||
_JSON_PATTERN: Final[re.Pattern[str]] = re.compile(r"({.*})", re.DOTALL)
|
||||
_I18N = get_i18n()
|
||||
|
||||
@@ -11,7 +11,7 @@ from rich.table import Table
|
||||
from crewai.agent import Agent
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.crew_events import CrewTestResultEvent
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.llm.base_llm import BaseLLM
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ from crewai.utilities.logger_utils import suppress_warnings
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agent import Agent
|
||||
from crewai.llm import LLM
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.llm.base_llm import BaseLLM
|
||||
from crewai.utilities.types import LLMMessage
|
||||
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ from typing import Any, Final
|
||||
|
||||
from crewai.cli.constants import DEFAULT_LLM_MODEL, ENV_VARS, LITELLM_PARAMS
|
||||
from crewai.llm import LLM
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.llm.base_llm import BaseLLM
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -5,7 +5,7 @@ import logging
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.llm.base_llm import BaseLLM
|
||||
from crewai.task import Task
|
||||
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ if TYPE_CHECKING:
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.crew import Crew
|
||||
from crewai.llm import LLM
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.llm.base_llm import BaseLLM
|
||||
from crewai.task import Task
|
||||
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ from crewai.knowledge.knowledge_config import KnowledgeConfig
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
|
||||
from crewai.llm import LLM
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.llm.base_llm import BaseLLM
|
||||
from crewai.process import Process
|
||||
from crewai.tools.tool_calling import InstructorToolCalling
|
||||
from crewai.tools.tool_usage import ToolUsage
|
||||
@@ -2148,7 +2148,7 @@ def test_agent_with_knowledge_with_no_crewai_knowledge():
|
||||
mock_knowledge.query.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr(record_mode="none", filter_headers=["authorization"])
|
||||
def test_agent_with_only_crewai_knowledge():
|
||||
mock_knowledge = MagicMock(spec=Knowledge)
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ from crewai.events.types.agent_events import LiteAgentExecutionStartedEvent
|
||||
from crewai.events.types.tool_usage_events import ToolUsageStartedEvent
|
||||
from crewai.lite_agent import LiteAgent
|
||||
from crewai.lite_agent_output import LiteAgentOutput
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.llm.base_llm import BaseLLM
|
||||
from pydantic import BaseModel, Field
|
||||
import pytest
|
||||
|
||||
|
||||
@@ -1,125 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Test Agent. Test backstory\nYour
|
||||
personal goal is: Test goal\nTo give my best complete final answer to the task
|
||||
respond using the exact following format:\n\nThought: I now can give a great
|
||||
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||
depends on it!"},{"role":"user","content":"\nCurrent Task: Say hello\n\nThis
|
||||
is the expected criteria for your final answer: hello\nyou MUST return the actual
|
||||
complete content as the final answer, not a summary.\n\nBegin! This is VERY
|
||||
important to you, use the tools available and give your best Final Answer, your
|
||||
job depends on it!\n\nThought:"}],"model":"gpt-4o-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '768'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA4xSTWvcMBC9+1cMOq/LrvcT30pCQ2hPPZW2wYylsa1EloQkZ7eE/e9F8nbtNCn0
|
||||
YvC8eU/vzcxLBsCkYCUw3mHgvVX5Db+TN40v1p+/HZ5Ot4/16VibL18Pt7v+O7JFZJj6kXj4w/rA
|
||||
TW8VBWn0CHNHGCiqrva7dbHe7TfLBPRGkIq01oZ8Y/JeapkXy2KTL/f56nBhd0Zy8qyEHxkAwEv6
|
||||
Rp9a0ImVkLRSpSfvsSVWXpsAmDMqVhh6L31AHdhiArnRgXSyfg/aHIGjhlY+EyC00Tag9kdyAD/1
|
||||
J6lRwcf0X0JHSpm5lKNm8Bjj6EGpGYBam4BxHCnEwwU5X20r01pnav8XlTVSS99VjtAbHS36YCxL
|
||||
6DkDeEjjGV4lZtaZ3oYqmCdKz62261GPTVuZocUFDCagmtV328U7epWggFL52YAZR96RmKjTNnAQ
|
||||
0syAbJb6rZv3tMfkUrf/Iz8BnJMNJCrrSEj+OvHU5ige7b/arlNOhpkn9yw5VUGSi5sQ1OCgxlNi
|
||||
/pcP1FeN1C056+R4T42ttitRHzbYYM2yc/YbAAD//wMA8psF7l0DAAA=
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 99f1539c6ee7300b-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 15 Nov 2025 19:59:01 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=iJ7DXHm9JEv8bD0KtW7kldOwGHzDHimj_krrUoVmeWE-1763236741-1.0.1.1-xHKDPJseB3CipXlmYujRzoXEH1migUJ0tnSBSv5GTUQTcz5bUrq4zOGEEP0EBmf.EovzlSffbmbTILOP0JSuiNfHJaGxv2e0zdL11mrf93s;
|
||||
path=/; expires=Sat, 15-Nov-25 20:29:01 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=oxDuGA6GZmxAwFshfsuJX0CY15NqcsDWeNUCWzgKh8s-1763236741049-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '423'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '442'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999830'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999832'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_40cbf724f6154e619aa343371e48c2e0
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -1,125 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Test Agent. Test backstory\nYour
|
||||
personal goal is: Test goal\nTo give my best complete final answer to the task
|
||||
respond using the exact following format:\n\nThought: I now can give a great
|
||||
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||
depends on it!"},{"role":"user","content":"\nCurrent Task: Say hello\n\nThis
|
||||
is the expected criteria for your final answer: hello\nyou MUST return the actual
|
||||
complete content as the final answer, not a summary.\n\nBegin! This is VERY
|
||||
important to you, use the tools available and give your best Final Answer, your
|
||||
job depends on it!\n\nThought:"}],"model":"gpt-4o-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '768'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFLLbtswELzrKxY8W4VtyQ/oFgRtkUvRS3tpA4EmV9K2FEmQVGwj8L8X
|
||||
pFxLSVMgFwHa2RnO7O5zBsBIsgqY6HgQvVX5vfhM98Vpf1x+LT82VGzoW3n+cj7J7+ZBsUVkmMMv
|
||||
FOEv64MwvVUYyOgRFg55wKi62m2LdbHdlcsE9EaiirTWhrw0eU+a8vVyXebLXb7aX9mdIYGeVfAj
|
||||
AwB4Tt/oU0s8sQqSVqr06D1vkVW3JgDmjIoVxr0nH7gObDGBwuiAOll/AG2OILiGlp4QOLTRNnDt
|
||||
j+gAfupPpLmCu/RfQYdKmbmUw2bwPMbRg1IzgGttAo/jSCEer8jlZluZ1jpz8K+orCFNvqsdcm90
|
||||
tOiDsSyhlwzgMY1neJGYWWd6G+pgfmN6brUpRj02bWWGrq9gMIGrWX27WbyhV0sMnJSfDZgJLjqU
|
||||
E3XaBh8kmRmQzVL/6+Yt7TE56fY98hMgBNqAsrYOJYmXiac2h/Fo/9d2m3IyzDy6JxJYB0IXNyGx
|
||||
4YMaT4n5sw/Y1w3pFp11NN5TY+vNSh72JW/4gWWX7A8AAAD//wMA4G7eUl0DAAA=
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 99f1539888ef2db2-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 15 Nov 2025 19:59:00 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=XfT4seD2vDCBhKUjM9OKFn5pKK0guvewRLCuULoZnBg-1763236740-1.0.1.1-zPAXYvNJ5nm4SdMpIaKFFAF1Uu_TTX1J6Pz3NhGjhY8GWCM13UtG2dg_4zqAf4ag.ZiOr0jBFi64qTdzWDsB8i4GpXeY0YJ_1WGwFIh21JY;
|
||||
path=/; expires=Sat, 15-Nov-25 20:29:00 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=ggMXMo_t19yDC2ZcfQNnNeE8_tibkraG0hezFWQf3Xk-1763236740469-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '466'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '485'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999832'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999832'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_d62131d777d34f568bd37dcf3ecc3749
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -1,823 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"trace_id": "REDACTED_TRACE_ID", "execution_type": "crew", "user_identifier":
|
||||
null, "execution_context": {"crew_fingerprint": null, "crew_name": "crew", "flow_name":
|
||||
null, "crewai_version": "1.4.1", "privacy_level": "standard"}, "execution_metadata":
|
||||
{"expected_duration_estimate": 300, "agent_count": 0, "task_count": 0, "flow_method_count":
|
||||
0, "execution_started_at": "2025-11-15T19:58:54.275699+00:00"}, "ephemeral_trace_id":
|
||||
"REDACTED_EPHEMERAL_ID"}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '488'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- CrewAI-CLI/1.4.1
|
||||
X-Crewai-Organization-Id:
|
||||
- REDACTED_ORG_UUID
|
||||
X-Crewai-Version:
|
||||
- 1.4.1
|
||||
method: POST
|
||||
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches
|
||||
response:
|
||||
body:
|
||||
string: '{"id":"REDACTED_UUID","ephemeral_trace_id": "REDACTED_EPHEMERAL_ID","execution_type":"crew","crew_name":"crew","flow_name":null,"status":"running","duration_ms":null,"crewai_version":"1.4.1","total_events":0,"execution_context":{"crew_fingerprint":null,"crew_name":"crew","flow_name":null,"crewai_version":"1.4.1","privacy_level":"standard"},"created_at":"2025-11-15T19:58:54.413Z","updated_at":"2025-11-15T19:58:54.413Z","access_code":
|
||||
"REDACTED_ACCESS_CODE","user_identifier":null}'
|
||||
headers:
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '515'
|
||||
Content-Type:
|
||||
- application/json; charset=utf-8
|
||||
Date:
|
||||
- Sat, 15 Nov 2025 19:58:54 GMT
|
||||
cache-control:
|
||||
- no-store
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
|
||||
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
|
||||
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
|
||||
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
|
||||
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
|
||||
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
|
||||
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
|
||||
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
|
||||
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
|
||||
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
|
||||
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
|
||||
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
|
||||
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
|
||||
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
|
||||
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
|
||||
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
|
||||
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
|
||||
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
|
||||
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
|
||||
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
|
||||
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
|
||||
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
|
||||
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
|
||||
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
|
||||
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
|
||||
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
|
||||
https://drive.google.com https://slides.google.com https://accounts.google.com
|
||||
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
|
||||
https://www.youtube.com https://share.descript.com'
|
||||
etag:
|
||||
- W/"f189110ff0b9b1a9a6de911c8373b6cf"
|
||||
expires:
|
||||
- '0'
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
pragma:
|
||||
- no-cache
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
strict-transport-security:
|
||||
- max-age=63072000; includeSubDomains
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- nosniff
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
x-request-id:
|
||||
- REDACTED_ORG_UUID
|
||||
x-runtime:
|
||||
- '0.050437'
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
status:
|
||||
code: 201
|
||||
message: Created
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Test Agent. Test backstory\nYour
|
||||
personal goal is: Test goal\nTo give my best complete final answer to the task
|
||||
respond using the exact following format:\n\nThought: I now can give a great
|
||||
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||
depends on it!"},{"role":"user","content":"\nCurrent Task: Say hello\n\nThis
|
||||
is the expected criteria for your final answer: hello\nyou MUST return the actual
|
||||
complete content as the final answer, not a summary.\n\nBegin! This is VERY
|
||||
important to you, use the tools available and give your best Final Answer, your
|
||||
job depends on it!\n\nThought:"}],"model":"gpt-4o-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '768'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFJNj9MwEL3nV4x8blDTz1VuuyuoQHBYcUKwiqb2JDE4Hst2WtCq/x05
|
||||
7TZZWCQukTJv3vN7M/OUAQitRAlCthhl50x+L3d64z887I6fLW/D22b+KRZ3t18ePu7sQcwSg/ff
|
||||
ScZn1hvJnTMUNdszLD1hpKRabDfLxXKzXa4GoGNFJtEaF/MV5522Ol/MF6t8vs2Lmwu7ZS0piBK+
|
||||
ZgAAT8M3+bSKfooS5rPnSkchYEOivDYBCM8mVQSGoENEG8VsBCXbSHaw/h4sH0GihUYfCBCaZBvQ
|
||||
hiN5gG/2nbZo4Hb4L6ElY3gq5anuA6Y4tjdmAqC1HDGNYwjxeEFOV9uGG+d5H/6gilpbHdrKEwa2
|
||||
yWKI7MSAnjKAx2E8/YvEwnnuXKwi/6DhuWK9POuJcSsTdHEBI0c0k/pmPXtFr1IUUZswGbCQKFtS
|
||||
I3XcBvZK8wTIJqn/dvOa9jm5ts3/yI+AlOQiqcp5Ulq+TDy2eUpH+6+265QHwyKQP2hJVdTk0yYU
|
||||
1dib8ymJ8CtE6qpa24a88/p8T7Wr1oXa36ywxr3ITtlvAAAA//8DADWEgGFdAwAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 99f15376386adf9a-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 15 Nov 2025 19:58:55 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=9N8QMgVR0T8m_LdeyT4oWCaQR47O2ACGkH9wXpfPKl8-1763236735-1.0.1.1-8xseH3YJzZo2ypKXBqE14SRYMqgQ1HSsW4ayyXXngCD66TFqO2xnfd9OqOA3mNh8hmoRXr9SGuLn84hiEL95_w_RQXvRFQ.JQb7mFThffN4;
|
||||
path=/; expires=Sat, 15-Nov-25 20:28:55 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=U_X_uM8Tk1B.1aiCr807RSOANcHTrF7LPQW1aUwSUCI-1763236735590-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '1083'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '1098'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999830'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999832'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_51e6f28672744e42b0cf17b175e98cad
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"events": [{"event_id": "REDACTED_EVENT_ID", "timestamp":
|
||||
"2025-11-15T19:58:54.274122+00:00", "type": "crew_kickoff_started", "event_data":
|
||||
{"timestamp": "2025-11-15T19:58:54.274122+00:00", "type": "crew_kickoff_started",
|
||||
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
|
||||
"task_id": null, "task_name": null, "agent_id": null, "agent_role": null, "crew_name":
|
||||
"crew", "crew": null, "inputs": null}}, {"event_id": "REDACTED_EVENT_ID",
|
||||
"timestamp": "2025-11-15T19:58:54.276149+00:00", "type": "task_started", "event_data":
|
||||
{"task_description": "Say hello", "expected_output": "hello", "task_name": "Say
|
||||
hello", "context": "", "agent_role": "Test Agent", "task_id": "REDACTED_TASK_ID"}},
|
||||
{"event_id": "REDACTED_EVENT_ID", "timestamp": "2025-11-15T19:58:54.277520+00:00",
|
||||
"type": "agent_execution_started", "event_data": {"agent_role": "Test Agent",
|
||||
"agent_goal": "Test goal", "agent_backstory": "Test backstory"}}, {"event_id":
|
||||
"6ab5ba71-81ef-4aea-800a-a4e332976b23", "timestamp": "2025-11-15T19:58:54.277708+00:00",
|
||||
"type": "llm_call_started", "event_data": {"timestamp": "2025-11-15T19:58:54.277708+00:00",
|
||||
"type": "llm_call_started", "source_fingerprint": null, "source_type": null,
|
||||
"fingerprint_metadata": null, "task_id": "REDACTED_TASK_ID",
|
||||
"task_name": "Say hello", "agent_id": "REDACTED_AGENT_ID",
|
||||
"agent_role": "Test Agent", "from_task": null, "from_agent": null, "model":
|
||||
"gpt-4o-mini", "messages": [{"role": "system", "content": "You are Test Agent.
|
||||
Test backstory\nYour personal goal is: Test goal\nTo give my best complete final
|
||||
answer to the task respond using the exact following format:\n\nThought: I now
|
||||
can give a great answer\nFinal Answer: Your final answer must be the great and
|
||||
the most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task:
|
||||
Say hello\n\nThis is the expected criteria for your final answer: hello\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}], "tools": null, "callbacks":
|
||||
["<crewai.utilities.token_counter_callback.TokenCalcHandler object at 0x10e737920>"],
|
||||
"available_functions": null}}, {"event_id": "REDACTED_EVENT_ID",
|
||||
"timestamp": "2025-11-15T19:58:55.617486+00:00", "type": "llm_call_completed",
|
||||
"event_data": {"timestamp": "2025-11-15T19:58:55.617486+00:00", "type": "llm_call_completed",
|
||||
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
|
||||
"task_id": "REDACTED_TASK_ID", "task_name": "Say hello",
|
||||
"agent_id": "REDACTED_AGENT_ID", "agent_role": "Test Agent",
|
||||
"from_task": null, "from_agent": null, "messages": [{"role": "system", "content":
|
||||
"You are Test Agent. Test backstory\nYour personal goal is: Test goal\nTo give
|
||||
my best complete final answer to the task respond using the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
||||
"content": "\nCurrent Task: Say hello\n\nThis is the expected criteria for your
|
||||
final answer: hello\nyou MUST return the actual complete content as the final
|
||||
answer, not a summary.\n\nBegin! This is VERY important to you, use the tools
|
||||
available and give your best Final Answer, your job depends on it!\n\nThought:"}],
|
||||
"response": "I now can give a great answer \nFinal Answer: hello", "call_type":
|
||||
"<LLMCallType.LLM_CALL: ''llm_call''>", "model": "gpt-4o-mini"}}, {"event_id":
|
||||
"6da05ee3-40a0-44d3-9070-58f83e91fb02", "timestamp": "2025-11-15T19:58:55.617749+00:00",
|
||||
"type": "agent_execution_completed", "event_data": {"agent_role": "Test Agent",
|
||||
"agent_goal": "Test goal", "agent_backstory": "Test backstory"}}, {"event_id":
|
||||
"323a901f-c31a-4937-aa83-99f80a195ec9", "timestamp": "2025-11-15T19:58:55.617956+00:00",
|
||||
"type": "task_completed", "event_data": {"task_description": "Say hello", "task_name":
|
||||
"Say hello", "task_id": "REDACTED_TASK_ID", "output_raw":
|
||||
"hello", "output_format": "OutputFormat.RAW", "agent_role": "Test Agent"}},
|
||||
{"event_id": "REDACTED_EVENT_ID", "timestamp": "2025-11-15T19:58:55.620199+00:00",
|
||||
"type": "crew_kickoff_completed", "event_data": {"timestamp": "2025-11-15T19:58:55.620199+00:00",
|
||||
"type": "crew_kickoff_completed", "source_fingerprint": null, "source_type":
|
||||
null, "fingerprint_metadata": null, "task_id": null, "task_name": null, "agent_id":
|
||||
null, "agent_role": null, "crew_name": "crew", "crew": null, "output": {"description":
|
||||
"Say hello", "name": "Say hello", "expected_output": "hello", "summary": "Say
|
||||
hello...", "raw": "hello", "pydantic": null, "json_dict": null, "agent": "Test
|
||||
Agent", "output_format": "raw", "messages": [{"role": "''system''", "content":
|
||||
"''You are Test Agent. Test backstory\\nYour personal goal is: Test goal\\nTo
|
||||
give my best complete final answer to the task respond using the exact following
|
||||
format:\\n\\nThought: I now can give a great answer\\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\\n\\nI MUST use these formats, my job depends on it!''"}, {"role":
|
||||
"''user''", "content": "''\\nCurrent Task: Say hello\\n\\nThis is the expected
|
||||
criteria for your final answer: hello\\nyou MUST return the actual complete
|
||||
content as the final answer, not a summary.\\n\\nBegin! This is VERY important
|
||||
to you, use the tools available and give your best Final Answer, your job depends
|
||||
on it!\\n\\nThought:''"}, {"role": "''assistant''", "content": "''I now can
|
||||
give a great answer \\nFinal Answer: hello''"}]}, "total_tokens": 165}}], "batch_metadata":
|
||||
{"events_count": 8, "batch_sequence": 1, "is_final_batch": false}}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '6047'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- CrewAI-CLI/1.4.1
|
||||
X-Crewai-Organization-Id:
|
||||
- REDACTED_ORG_UUID
|
||||
X-Crewai-Version:
|
||||
- 1.4.1
|
||||
method: POST
|
||||
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches/REDACTED_UUID/events
|
||||
response:
|
||||
body:
|
||||
string: '{"events_created":8,"ephemeral_trace_batch_id": "REDACTED_BATCH_ID"}'
|
||||
headers:
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '86'
|
||||
Content-Type:
|
||||
- application/json; charset=utf-8
|
||||
Date:
|
||||
- Sat, 15 Nov 2025 19:58:55 GMT
|
||||
cache-control:
|
||||
- no-store
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
|
||||
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
|
||||
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
|
||||
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
|
||||
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
|
||||
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
|
||||
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
|
||||
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
|
||||
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
|
||||
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
|
||||
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
|
||||
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
|
||||
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
|
||||
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
|
||||
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
|
||||
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
|
||||
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
|
||||
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
|
||||
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
|
||||
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
|
||||
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
|
||||
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
|
||||
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
|
||||
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
|
||||
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
|
||||
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
|
||||
https://drive.google.com https://slides.google.com https://accounts.google.com
|
||||
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
|
||||
https://www.youtube.com https://share.descript.com'
|
||||
etag:
|
||||
- W/"5763c4d7ea0188702ab3c06667edacb2"
|
||||
expires:
|
||||
- '0'
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
pragma:
|
||||
- no-cache
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
strict-transport-security:
|
||||
- max-age=63072000; includeSubDomains
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- nosniff
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
x-request-id:
|
||||
- REDACTED_ORG_UUID
|
||||
x-runtime:
|
||||
- '0.085717'
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"status": "completed", "duration_ms": 1545, "final_event_count": 8}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '68'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- CrewAI-CLI/1.4.1
|
||||
X-Crewai-Organization-Id:
|
||||
- REDACTED_ORG_UUID
|
||||
X-Crewai-Version:
|
||||
- 1.4.1
|
||||
method: PATCH
|
||||
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches/REDACTED_UUID/finalize
|
||||
response:
|
||||
body:
|
||||
string: '{"id":"REDACTED_UUID","ephemeral_trace_id": "REDACTED_EPHEMERAL_ID","execution_type":"crew","crew_name":"crew","flow_name":null,"status":"completed","duration_ms":1545,"crewai_version":"1.4.1","total_events":8,"execution_context":{"crew_name":"crew","flow_name":null,"privacy_level":"standard","crewai_version":"1.4.1","crew_fingerprint":null},"created_at":"2025-11-15T19:58:54.413Z","updated_at":"2025-11-15T19:58:55.963Z","access_code":
|
||||
"REDACTED_ACCESS_CODE","user_identifier":null}'
|
||||
headers:
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '517'
|
||||
Content-Type:
|
||||
- application/json; charset=utf-8
|
||||
Date:
|
||||
- Sat, 15 Nov 2025 19:58:55 GMT
|
||||
cache-control:
|
||||
- no-store
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
|
||||
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
|
||||
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
|
||||
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
|
||||
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
|
||||
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
|
||||
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
|
||||
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
|
||||
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
|
||||
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
|
||||
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
|
||||
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
|
||||
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
|
||||
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
|
||||
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
|
||||
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
|
||||
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
|
||||
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
|
||||
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
|
||||
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
|
||||
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
|
||||
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
|
||||
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
|
||||
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
|
||||
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
|
||||
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
|
||||
https://drive.google.com https://slides.google.com https://accounts.google.com
|
||||
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
|
||||
https://www.youtube.com https://share.descript.com'
|
||||
etag:
|
||||
- W/"87272a0b299949ee15066ac5b6c288c8"
|
||||
expires:
|
||||
- '0'
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
pragma:
|
||||
- no-cache
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
strict-transport-security:
|
||||
- max-age=63072000; includeSubDomains
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- nosniff
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
x-request-id:
|
||||
- REDACTED_ORG_UUID
|
||||
x-runtime:
|
||||
- '0.040548'
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: !!binary |
|
||||
Ct8QCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkSthAKEgoQY3Jld2FpLnRl
|
||||
bGVtZXRyeRKcCAoQnBgYneZ/2zN+PxfURVYEhxIIl8jmYkveFbEqDENyZXcgQ3JlYXRlZDABOSBG
|
||||
V8F3RngYQbD+XsF3RngYShkKDmNyZXdhaV92ZXJzaW9uEgcKBTEuNC4xShsKDnB5dGhvbl92ZXJz
|
||||
aW9uEgkKBzMuMTIuMTBKLgoIY3Jld19rZXkSIgogZTU5ZjRhOTQ1MDMyOTJhYjg2NTVhODc4Nzlk
|
||||
ZjNkMGVKMQoHY3Jld19pZBImCiRmNTFiYWY5YS0wOTliLTQ2ZjYtYTQxZS0zYjVkNTNmN2U3NzJK
|
||||
OgoQY3Jld19maW5nZXJwcmludBImCiRlYTU0MGVkMC1mMmQxLTQwNDQtOGI5Zi1hNjI0MmY1NGYx
|
||||
MjRKHAoMY3Jld19wcm9jZXNzEgwKCnNlcXVlbnRpYWxKEQoLY3Jld19tZW1vcnkSAhAAShoKFGNy
|
||||
ZXdfbnVtYmVyX29mX3Rhc2tzEgIYAUobChVjcmV3X251bWJlcl9vZl9hZ2VudHMSAhgBSjsKG2Ny
|
||||
ZXdfZmluZ2VycHJpbnRfY3JlYXRlZF9hdBIcChoyMDI1LTExLTE1VDE0OjU4OjU0LjI3MjkyMUrR
|
||||
AgoLY3Jld19hZ2VudHMSwQIKvgJbeyJrZXkiOiAiMGMzZDYzYTY5MGUxM2Y1MTBkZTNjZDZkZmQz
|
||||
MTgxNmIiLCAiaWQiOiAiNTQ4YzlkOWMtN2M4OS00NTcwLTg2MzUtMTU3OTc0ZDc1M2JlIiwgInJv
|
||||
bGUiOiAiVGVzdCBBZ2VudCIsICJ2ZXJib3NlPyI6IGZhbHNlLCAibWF4X2l0ZXIiOiAyNSwgIm1h
|
||||
eF9ycG0iOiBudWxsLCAiZnVuY3Rpb25fY2FsbGluZ19sbG0iOiAiIiwgImxsbSI6ICJncHQtNG8t
|
||||
bWluaSIsICJkZWxlZ2F0aW9uX2VuYWJsZWQ/IjogZmFsc2UsICJhbGxvd19jb2RlX2V4ZWN1dGlv
|
||||
bj8iOiBmYWxzZSwgIm1heF9yZXRyeV9saW1pdCI6IDIsICJ0b29sc19uYW1lcyI6IFtdfV1K/wEK
|
||||
CmNyZXdfdGFza3MS8AEK7QFbeyJrZXkiOiAiMTdjYzlhYjJiMmQwYmIwY2RkMzZkNTNlMDUyYmEz
|
||||
YTEiLCAiaWQiOiAiMGFjODNjNzktYmZiNS00MTc5LTk0NzAtMmI0OWIxNmUxM2I0IiwgImFzeW5j
|
||||
X2V4ZWN1dGlvbj8iOiBmYWxzZSwgImh1bWFuX2lucHV0PyI6IGZhbHNlLCAiYWdlbnRfcm9sZSI6
|
||||
ICJUZXN0IEFnZW50IiwgImFnZW50X2tleSI6ICIwYzNkNjNhNjkwZTEzZjUxMGRlM2NkNmRmZDMx
|
||||
ODE2YiIsICJ0b29sc19uYW1lcyI6IFtdfV16AhgBhQEAAQAAEpwEChA/Ny+I8Uec4bmw/hRH3QdM
|
||||
Egj4Fl8kb84nDCoMVGFzayBDcmVhdGVkMAE5yF54wXdGeBhBwAZ5wXdGeBhKLgoIY3Jld19rZXkS
|
||||
IgogZTU5ZjRhOTQ1MDMyOTJhYjg2NTVhODc4NzlkZjNkMGVKMQoHY3Jld19pZBImCiRmNTFiYWY5
|
||||
YS0wOTliLTQ2ZjYtYTQxZS0zYjVkNTNmN2U3NzJKOgoQY3Jld19maW5nZXJwcmludBImCiRlYTU0
|
||||
MGVkMC1mMmQxLTQwNDQtOGI5Zi1hNjI0MmY1NGYxMjRKLgoIdGFza19rZXkSIgogMTdjYzlhYjJi
|
||||
MmQwYmIwY2RkMzZkNTNlMDUyYmEzYTFKMQoHdGFza19pZBImCiQwYWM4M2M3OS1iZmI1LTQxNzkt
|
||||
OTQ3MC0yYjQ5YjE2ZTEzYjRKOgoQdGFza19maW5nZXJwcmludBImCiQ4NTBjZTAyMS1mYmMxLTRk
|
||||
MzEtYTA3Ny0xZDVmNjMzOWMyY2VKOwobdGFza19maW5nZXJwcmludF9jcmVhdGVkX2F0EhwKGjIw
|
||||
MjUtMTEtMTVUMTQ6NTg6NTQuMjcyODY4SjsKEWFnZW50X2ZpbmdlcnByaW50EiYKJDUzMWExMTg3
|
||||
LTZmOWEtNGNmMi1hYzMwLWUzZTczMWE4MzY5Y0oaCgphZ2VudF9yb2xlEgwKClRlc3QgQWdlbnR6
|
||||
AhgBhQEAAQAAEuEDChCrg6pKIgwTTkf7+bOsNaasEgjUfxiqLjY0BCoOVGFzayBFeGVjdXRpb24w
|
||||
ATlwPXnBd0Z4GEHg9nIReEZ4GEouCghjcmV3X2tleRIiCiBlNTlmNGE5NDUwMzI5MmFiODY1NWE4
|
||||
Nzg3OWRmM2QwZUoxCgdjcmV3X2lkEiYKJGY1MWJhZjlhLTA5OWItNDZmNi1hNDFlLTNiNWQ1M2Y3
|
||||
ZTc3Mko6ChBjcmV3X2ZpbmdlcnByaW50EiYKJGVhNTQwZWQwLWYyZDEtNDA0NC04YjlmLWE2MjQy
|
||||
ZjU0ZjEyNEouCgh0YXNrX2tleRIiCiAxN2NjOWFiMmIyZDBiYjBjZGQzNmQ1M2UwNTJiYTNhMUox
|
||||
Cgd0YXNrX2lkEiYKJDBhYzgzYzc5LWJmYjUtNDE3OS05NDcwLTJiNDliMTZlMTNiNEo7ChFhZ2Vu
|
||||
dF9maW5nZXJwcmludBImCiQ1MzFhMTE4Ny02ZjlhLTRjZjItYWMzMC1lM2U3MzFhODM2OWNKGgoK
|
||||
YWdlbnRfcm9sZRIMCgpUZXN0IEFnZW50SjoKEHRhc2tfZmluZ2VycHJpbnQSJgokODUwY2UwMjEt
|
||||
ZmJjMS00ZDMxLWEwNzctMWQ1ZjYzMzljMmNlegIYAYUBAAEAAA==
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '2146'
|
||||
Content-Type:
|
||||
- application/x-protobuf
|
||||
User-Agent:
|
||||
- OTel-OTLP-Exporter-Python/1.38.0
|
||||
method: POST
|
||||
uri: https://telemetry.crewai.com:4319/v1/traces
|
||||
response:
|
||||
body:
|
||||
string: "\n\0"
|
||||
headers:
|
||||
Content-Length:
|
||||
- '2'
|
||||
Content-Type:
|
||||
- application/x-protobuf
|
||||
Date:
|
||||
- Sat, 15 Nov 2025 19:58:59 GMT
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"events": [{"event_id": "REDACTED_EVENT_ID", "timestamp":
|
||||
"2025-11-15T20:12:50.759077+00:00", "type": "crew_kickoff_started", "event_data":
|
||||
{"timestamp": "2025-11-15T20:12:50.759077+00:00", "type": "crew_kickoff_started",
|
||||
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
|
||||
"task_id": null, "task_name": null, "agent_id": null, "agent_role": null, "crew_name":
|
||||
"crew", "crew": null, "inputs": null}}, {"event_id": "REDACTED_EVENT_ID",
|
||||
"timestamp": "2025-11-15T20:12:50.761789+00:00", "type": "task_started", "event_data":
|
||||
{"task_description": "Say hello", "expected_output": "hello", "task_name": "Say
|
||||
hello", "context": "", "agent_role": "Test Agent", "task_id": "REDACTED_TASK_ID"}},
|
||||
{"event_id": "REDACTED_EVENT_ID", "timestamp": "2025-11-15T20:12:50.762556+00:00",
|
||||
"type": "agent_execution_started", "event_data": {"agent_role": "Test Agent",
|
||||
"agent_goal": "Test goal", "agent_backstory": "Test backstory"}}, {"event_id":
|
||||
"112efd06-87b7-4600-892f-3c96672571c6", "timestamp": "2025-11-15T20:12:50.762726+00:00",
|
||||
"type": "llm_call_started", "event_data": {"timestamp": "2025-11-15T20:12:50.762726+00:00",
|
||||
"type": "llm_call_started", "source_fingerprint": null, "source_type": null,
|
||||
"fingerprint_metadata": null, "task_id": "REDACTED_TASK_ID",
|
||||
"task_name": "Say hello", "agent_id": "REDACTED_AGENT_ID",
|
||||
"agent_role": "Test Agent", "from_task": null, "from_agent": null, "model":
|
||||
"gpt-4o-mini", "messages": [{"role": "system", "content": "You are Test Agent.
|
||||
Test backstory\nYour personal goal is: Test goal\nTo give my best complete final
|
||||
answer to the task respond using the exact following format:\n\nThought: I now
|
||||
can give a great answer\nFinal Answer: Your final answer must be the great and
|
||||
the most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task:
|
||||
Say hello\n\nThis is the expected criteria for your final answer: hello\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}], "tools": null, "callbacks":
|
||||
["<crewai.utilities.token_counter_callback.TokenCalcHandler object at 0x10e8b5b20>"],
|
||||
"available_functions": null}}, {"event_id": "REDACTED_EVENT_ID",
|
||||
"timestamp": "2025-11-15T20:12:50.877587+00:00", "type": "llm_call_completed",
|
||||
"event_data": {"timestamp": "2025-11-15T20:12:50.877587+00:00", "type": "llm_call_completed",
|
||||
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
|
||||
"task_id": "REDACTED_TASK_ID", "task_name": "Say hello",
|
||||
"agent_id": "REDACTED_AGENT_ID", "agent_role": "Test Agent",
|
||||
"from_task": null, "from_agent": null, "messages": [{"role": "system", "content":
|
||||
"You are Test Agent. Test backstory\nYour personal goal is: Test goal\nTo give
|
||||
my best complete final answer to the task respond using the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
||||
"content": "\nCurrent Task: Say hello\n\nThis is the expected criteria for your
|
||||
final answer: hello\nyou MUST return the actual complete content as the final
|
||||
answer, not a summary.\n\nBegin! This is VERY important to you, use the tools
|
||||
available and give your best Final Answer, your job depends on it!\n\nThought:"}],
|
||||
"response": "I now can give a great answer \nFinal Answer: hello", "call_type":
|
||||
"<LLMCallType.LLM_CALL: ''llm_call''>", "model": "gpt-4o-mini"}}, {"event_id":
|
||||
"430a26b3-c38b-4f75-8656-412124a6df95", "timestamp": "2025-11-15T20:12:50.877724+00:00",
|
||||
"type": "agent_execution_completed", "event_data": {"agent_role": "Test Agent",
|
||||
"agent_goal": "Test goal", "agent_backstory": "Test backstory"}}, {"event_id":
|
||||
"a76bbe00-1cc7-44a8-9ec3-c4ed8fca948d", "timestamp": "2025-11-15T20:12:50.877830+00:00",
|
||||
"type": "task_completed", "event_data": {"task_description": "Say hello", "task_name":
|
||||
"Say hello", "task_id": "REDACTED_TASK_ID", "output_raw":
|
||||
"hello", "output_format": "OutputFormat.RAW", "agent_role": "Test Agent"}},
|
||||
{"event_id": "REDACTED_EVENT_ID", "timestamp": "2025-11-15T20:12:50.879327+00:00",
|
||||
"type": "crew_kickoff_completed", "event_data": {"timestamp": "2025-11-15T20:12:50.879327+00:00",
|
||||
"type": "crew_kickoff_completed", "source_fingerprint": null, "source_type":
|
||||
null, "fingerprint_metadata": null, "task_id": null, "task_name": null, "agent_id":
|
||||
null, "agent_role": null, "crew_name": "crew", "crew": null, "output": {"description":
|
||||
"Say hello", "name": "Say hello", "expected_output": "hello", "summary": "Say
|
||||
hello...", "raw": "hello", "pydantic": null, "json_dict": null, "agent": "Test
|
||||
Agent", "output_format": "raw", "messages": [{"role": "''system''", "content":
|
||||
"''You are Test Agent. Test backstory\\nYour personal goal is: Test goal\\nTo
|
||||
give my best complete final answer to the task respond using the exact following
|
||||
format:\\n\\nThought: I now can give a great answer\\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\\n\\nI MUST use these formats, my job depends on it!''"}, {"role":
|
||||
"''user''", "content": "''\\nCurrent Task: Say hello\\n\\nThis is the expected
|
||||
criteria for your final answer: hello\\nyou MUST return the actual complete
|
||||
content as the final answer, not a summary.\\n\\nBegin! This is VERY important
|
||||
to you, use the tools available and give your best Final Answer, your job depends
|
||||
on it!\\n\\nThought:''"}, {"role": "''assistant''", "content": "''I now can
|
||||
give a great answer \\nFinal Answer: hello''"}]}, "total_tokens": 165}}], "batch_metadata":
|
||||
{"events_count": 8, "batch_sequence": 1, "is_final_batch": false}}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '6047'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- CrewAI-CLI/1.4.1
|
||||
X-Crewai-Organization-Id:
|
||||
- 73c2b193-f579-422c-84c7-76a39a1da77f
|
||||
X-Crewai-Version:
|
||||
- 1.4.1
|
||||
method: POST
|
||||
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches/REDACTED_EPHEMERAL_ID/events
|
||||
response:
|
||||
body:
|
||||
string: '{"error":"Couldn''t find EphemeralTraceBatch with [WHERE \"ephemeral_trace_batches\".\"ephemeral_trace_id\"
|
||||
= $1]","message":"Trace batch not found"}'
|
||||
headers:
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '148'
|
||||
Content-Type:
|
||||
- application/json; charset=utf-8
|
||||
Date:
|
||||
- Sat, 15 Nov 2025 20:12:51 GMT
|
||||
cache-control:
|
||||
- no-store
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
|
||||
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
|
||||
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
|
||||
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
|
||||
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
|
||||
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
|
||||
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
|
||||
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
|
||||
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
|
||||
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
|
||||
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
|
||||
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
|
||||
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
|
||||
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
|
||||
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
|
||||
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
|
||||
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
|
||||
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
|
||||
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
|
||||
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
|
||||
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
|
||||
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
|
||||
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
|
||||
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
|
||||
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
|
||||
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
|
||||
https://drive.google.com https://slides.google.com https://accounts.google.com
|
||||
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
|
||||
https://www.youtube.com https://share.descript.com'
|
||||
expires:
|
||||
- '0'
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
pragma:
|
||||
- no-cache
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
strict-transport-security:
|
||||
- max-age=63072000; includeSubDomains
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- nosniff
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
x-request-id:
|
||||
- 869cd156-577e-4f89-a822-0cd097bfb011
|
||||
x-runtime:
|
||||
- '0.038867'
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
status:
|
||||
code: 404
|
||||
message: Not Found
|
||||
- request:
|
||||
body: '{"status": "failed", "failure_reason": "Error sending events to backend"}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '73'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- CrewAI-CLI/1.4.1
|
||||
X-Crewai-Organization-Id:
|
||||
- 73c2b193-f579-422c-84c7-76a39a1da77f
|
||||
X-Crewai-Version:
|
||||
- 1.4.1
|
||||
method: PATCH
|
||||
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/batches/REDACTED_EPHEMERAL_ID
|
||||
response:
|
||||
body:
|
||||
string: '{"error":"bad_credentials","message":"Bad credentials"}'
|
||||
headers:
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '55'
|
||||
Content-Type:
|
||||
- application/json; charset=utf-8
|
||||
Date:
|
||||
- Sat, 15 Nov 2025 20:12:51 GMT
|
||||
cache-control:
|
||||
- no-store
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
|
||||
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
|
||||
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
|
||||
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
|
||||
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
|
||||
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
|
||||
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
|
||||
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
|
||||
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
|
||||
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
|
||||
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
|
||||
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
|
||||
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
|
||||
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
|
||||
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
|
||||
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
|
||||
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
|
||||
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
|
||||
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
|
||||
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
|
||||
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
|
||||
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
|
||||
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
|
||||
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
|
||||
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
|
||||
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
|
||||
https://drive.google.com https://slides.google.com https://accounts.google.com
|
||||
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
|
||||
https://www.youtube.com https://share.descript.com'
|
||||
expires:
|
||||
- '0'
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
pragma:
|
||||
- no-cache
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
strict-transport-security:
|
||||
- max-age=63072000; includeSubDomains
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- nosniff
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
x-request-id:
|
||||
- 1d74da02-f5f2-4bdc-8c9e-51bc9d3aff98
|
||||
x-runtime:
|
||||
- '0.046789'
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
status:
|
||||
code: 401
|
||||
message: Unauthorized
|
||||
version: 1
|
||||
@@ -1,817 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"trace_id": "REDACTED_TRACE_ID", "execution_type": "crew", "user_identifier":
|
||||
null, "execution_context": {"crew_fingerprint": null, "crew_name": "crew", "flow_name":
|
||||
null, "crewai_version": "1.4.1", "privacy_level": "standard"}, "execution_metadata":
|
||||
{"expected_duration_estimate": 300, "agent_count": 0, "task_count": 0, "flow_method_count":
|
||||
0, "execution_started_at": "2025-11-15T20:00:40.213233+00:00"}, "ephemeral_trace_id":
|
||||
"REDACTED_EPHEMERAL_ID"}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '488'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- CrewAI-CLI/1.4.1
|
||||
X-Crewai-Organization-Id:
|
||||
- REDACTED_ORG_UUID
|
||||
X-Crewai-Version:
|
||||
- 1.4.1
|
||||
method: POST
|
||||
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches
|
||||
response:
|
||||
body:
|
||||
string: '{"id":"REDACTED_UUID","ephemeral_trace_id": "REDACTED_EPHEMERAL_ID","execution_type":"crew","crew_name":"crew","flow_name":null,"status":"running","duration_ms":null,"crewai_version":"1.4.1","total_events":0,"execution_context":{"crew_fingerprint":null,"crew_name":"crew","flow_name":null,"crewai_version":"1.4.1","privacy_level":"standard"},"created_at":"2025-11-15T20:00:40.347Z","updated_at":"2025-11-15T20:00:40.347Z","access_code":
|
||||
"REDACTED_ACCESS_CODE","user_identifier":null}'
|
||||
headers:
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '515'
|
||||
Content-Type:
|
||||
- application/json; charset=utf-8
|
||||
Date:
|
||||
- Sat, 15 Nov 2025 20:00:40 GMT
|
||||
cache-control:
|
||||
- no-store
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
|
||||
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
|
||||
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
|
||||
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
|
||||
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
|
||||
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
|
||||
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
|
||||
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
|
||||
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
|
||||
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
|
||||
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
|
||||
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
|
||||
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
|
||||
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
|
||||
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
|
||||
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
|
||||
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
|
||||
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
|
||||
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
|
||||
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
|
||||
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
|
||||
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
|
||||
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
|
||||
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
|
||||
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
|
||||
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
|
||||
https://drive.google.com https://slides.google.com https://accounts.google.com
|
||||
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
|
||||
https://www.youtube.com https://share.descript.com'
|
||||
etag:
|
||||
- W/"1dad6ea33b1bd62ea816884d05ca0842"
|
||||
expires:
|
||||
- '0'
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
pragma:
|
||||
- no-cache
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
strict-transport-security:
|
||||
- max-age=63072000; includeSubDomains
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- nosniff
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
x-request-id:
|
||||
- REDACTED_ORG_UUID
|
||||
x-runtime:
|
||||
- '0.046518'
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
status:
|
||||
code: 201
|
||||
message: Created
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Test Agent. Test backstory\nYour
|
||||
personal goal is: Test goal\nTo give my best complete final answer to the task
|
||||
respond using the exact following format:\n\nThought: I now can give a great
|
||||
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||
depends on it!"},{"role":"user","content":"\nCurrent Task: Say hello\n\nThis
|
||||
is the expected criteria for your final answer: hello\nyou MUST return the actual
|
||||
complete content as the final answer, not a summary.\n\nBegin! This is VERY
|
||||
important to you, use the tools available and give your best Final Answer, your
|
||||
job depends on it!\n\nThought:"}],"model":"gpt-4o-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '768'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFLLbtswELzrKxY8W4XlV1zfggBtekt76yMQVtRKoktxCZJKWgT+94KU
|
||||
YyltCuQiQDs7w5ndfcoAhKrFAYTsMMje6vxGfjzykXdfzZe7z7eh54Jub77dvZf9vqjEIjK4OpIM
|
||||
z6x3knurKSg2IywdYaCoWlzt1qv1br9ZJqDnmnSktTbkG857ZVS+Wq42+fIqL/ZndsdKkhcH+J4B
|
||||
ADylb/RpavolDpC0UqUn77Elcbg0AQjHOlYEeq98QBPEYgIlm0AmWf8Ehh9BooFWPRAgtNE2oPGP
|
||||
5AB+mA/KoIbr9H+AjrTmuZSjZvAY45hB6xmAxnDAOI4U4v6MnC62NbfWceX/oopGGeW70hF6NtGi
|
||||
D2xFQk8ZwH0az/AisbCOexvKwD8pPVds16OemLYyQ1dnMHBAPavvtotX9MqaAirtZwMWEmVH9USd
|
||||
toFDrXgGZLPU/7p5TXtMrkz7FvkJkJJsoLq0jmolXyae2hzFo/1f22XKybDw5B6UpDIocnETNTU4
|
||||
6PGUhP/tA/Vlo0xLzjo13lNjy21RV/sNNliJ7JT9AQAA//8DANqYTe5dAwAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 99f1560c3f5d4809-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 15 Nov 2025 20:00:41 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=h.tA2Rq1WhYqakfMp30WNbqx91S5jvXxlyjIW8bMhHY-1763236841-1.0.1.1-V.a.LzWhmsyvoXIFirG2pejIlbZ7BiLfwdlv6dDF.QddisjnkoYsgBPhVnxl.GwDFVDKymer1bQK_6vSoHBaQIcV4MJ7YayMl9lLs0.UcFM;
|
||||
path=/; expires=Sat, 15-Nov-25 20:30:41 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=8Td_UnVGEcigZt.Nhy9rEFpaW9pgP0QJpdzFdEoktJk-1763236841097-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '563'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '666'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999832'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999832'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_8e8e5bfc663840d68daf4ac70308eece
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"events": [{"event_id": "REDACTED_EVENT_ID", "timestamp": "2025-11-15T20:00:40.210936+00:00",
|
||||
"type": "crew_kickoff_started", "event_data": {"timestamp": "2025-11-15T20:00:40.210936+00:00",
|
||||
"type": "crew_kickoff_started", "source_fingerprint": null, "source_type": null,
|
||||
"fingerprint_metadata": null, "task_id": null, "task_name": null, "agent_id":
|
||||
null, "agent_role": null, "crew_name": "crew", "crew": null, "inputs": null}},
|
||||
{"event_id": "REDACTED_EVENT_ID", "timestamp": "2025-11-15T20:00:40.213519+00:00",
|
||||
"type": "agent_execution_started", "event_data": {"agent_role": "Test Agent",
|
||||
"agent_goal": "Test goal", "agent_backstory": "Test backstory"}}, {"event_id":
|
||||
"REDACTED_EVENT_ID", "timestamp": "2025-11-15T20:00:40.213671+00:00", "type":
|
||||
"llm_call_started", "event_data": {"timestamp": "2025-11-15T20:00:40.213671+00:00",
|
||||
"type": "llm_call_started", "source_fingerprint": null, "source_type": null,
|
||||
"fingerprint_metadata": null, "task_id": "REDACTED_TASK_ID", "task_name": "Say
|
||||
hello", "agent_id": "REDACTED_AGENT_ID", "agent_role": "Test Agent", "from_task":
|
||||
null, "from_agent": null, "model": "gpt-4o-mini", "messages": [{"role": "system",
|
||||
"content": "You are Test Agent. Test backstory\nYour personal goal is: Test
|
||||
goal\nTo give my best complete final answer to the task respond using the exact
|
||||
following format:\n\nThought: I now can give a great answer\nFinal Answer: Your
|
||||
final answer must be the great and the most complete as possible, it must be
|
||||
outcome described.\n\nI MUST use these formats, my job depends on it!"}, {"role":
|
||||
"user", "content": "\nCurrent Task: Say hello\n\nThis is the expected criteria
|
||||
for your final answer: hello\nyou MUST return the actual complete content as
|
||||
the final answer, not a summary.\n\nBegin! This is VERY important to you, use
|
||||
the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],
|
||||
"tools": null, "callbacks": ["<crewai.utilities.token_counter_callback.TokenCalcHandler
|
||||
object at 0x108cbb5f0>"], "available_functions": null}}, {"event_id": "REDACTED_EVENT_ID",
|
||||
"timestamp": "2025-11-15T20:00:41.117164+00:00", "type": "llm_call_completed",
|
||||
"event_data": {"timestamp": "2025-11-15T20:00:41.117164+00:00", "type": "llm_call_completed",
|
||||
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
|
||||
"task_id": "REDACTED_TASK_ID", "task_name": "Say hello", "agent_id": "REDACTED_AGENT_ID",
|
||||
"agent_role": "Test Agent", "from_task": null, "from_agent": null, "messages":
|
||||
[{"role": "system", "content": "You are Test Agent. Test backstory\nYour personal
|
||||
goal is: Test goal\nTo give my best complete final answer to the task respond
|
||||
using the exact following format:\n\nThought: I now can give a great answer\nFinal
|
||||
Answer: Your final answer must be the great and the most complete as possible,
|
||||
it must be outcome described.\n\nI MUST use these formats, my job depends on
|
||||
it!"}, {"role": "user", "content": "\nCurrent Task: Say hello\n\nThis is the
|
||||
expected criteria for your final answer: hello\nyou MUST return the actual complete
|
||||
content as the final answer, not a summary.\n\nBegin! This is VERY important
|
||||
to you, use the tools available and give your best Final Answer, your job depends
|
||||
on it!\n\nThought:"}], "response": "I now can give a great answer \nFinal Answer:
|
||||
hello", "call_type": "<LLMCallType.LLM_CALL: ''llm_call''>", "model": "gpt-4o-mini"}},
|
||||
{"event_id": "1d32853b-04dd-49f1-9b0b-fca92a82ea0f", "timestamp": "2025-11-15T20:00:41.117412+00:00",
|
||||
"type": "agent_execution_completed", "event_data": {"agent_role": "Test Agent",
|
||||
"agent_goal": "Test goal", "agent_backstory": "Test backstory"}}, {"event_id":
|
||||
"3af2dbb3-6117-4df1-9dc8-3b4cbc1bb689", "timestamp": "2025-11-15T20:00:41.117869+00:00",
|
||||
"type": "task_completed", "event_data": {"task_description": "Say hello", "task_name":
|
||||
"Say hello", "task_id": "REDACTED_TASK_ID", "output_raw": "hello", "output_format":
|
||||
"OutputFormat.RAW", "agent_role": "Test Agent"}}, {"event_id": "REDACTED_EVENT_ID",
|
||||
"timestamp": "2025-11-15T20:00:41.119050+00:00", "type": "crew_kickoff_completed",
|
||||
"event_data": {"timestamp": "2025-11-15T20:00:41.119050+00:00", "type": "crew_kickoff_completed",
|
||||
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
|
||||
"task_id": null, "task_name": null, "agent_id": null, "agent_role": null, "crew_name":
|
||||
"crew", "crew": null, "output": {"description": "Say hello", "name": "Say hello",
|
||||
"expected_output": "hello", "summary": "Say hello...", "raw": "hello", "pydantic":
|
||||
null, "json_dict": null, "agent": "Test Agent", "output_format": "raw", "messages":
|
||||
[{"role": "''system''", "content": "''You are Test Agent. Test backstory\\nYour
|
||||
personal goal is: Test goal\\nTo give my best complete final answer to the task
|
||||
respond using the exact following format:\\n\\nThought: I now can give a great
|
||||
answer\\nFinal Answer: Your final answer must be the great and the most complete
|
||||
as possible, it must be outcome described.\\n\\nI MUST use these formats, my
|
||||
job depends on it!''"}, {"role": "''user''", "content": "''\\nCurrent Task:
|
||||
Say hello\\n\\nThis is the expected criteria for your final answer: hello\\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\\n\\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\\n\\nThought:''"}, {"role": "''assistant''",
|
||||
"content": "''I now can give a great answer \\nFinal Answer: hello''"}]}, "total_tokens":
|
||||
165}}], "batch_metadata": {"events_count": 7, "batch_sequence": 1, "is_final_batch":
|
||||
false}}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '5723'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- CrewAI-CLI/1.4.1
|
||||
X-Crewai-Organization-Id:
|
||||
- REDACTED_ORG_UUID
|
||||
X-Crewai-Version:
|
||||
- 1.4.1
|
||||
method: POST
|
||||
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches/REDACTED_UUID/events
|
||||
response:
|
||||
body:
|
||||
string: '{"events_created":7,"ephemeral_trace_batch_id": "REDACTED_BATCH_ID"}'
|
||||
headers:
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '86'
|
||||
Content-Type:
|
||||
- application/json; charset=utf-8
|
||||
Date:
|
||||
- Sat, 15 Nov 2025 20:00:41 GMT
|
||||
cache-control:
|
||||
- no-store
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
|
||||
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
|
||||
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
|
||||
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
|
||||
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
|
||||
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
|
||||
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
|
||||
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
|
||||
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
|
||||
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
|
||||
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
|
||||
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
|
||||
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
|
||||
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
|
||||
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
|
||||
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
|
||||
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
|
||||
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
|
||||
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
|
||||
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
|
||||
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
|
||||
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
|
||||
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
|
||||
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
|
||||
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
|
||||
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
|
||||
https://drive.google.com https://slides.google.com https://accounts.google.com
|
||||
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
|
||||
https://www.youtube.com https://share.descript.com'
|
||||
etag:
|
||||
- W/"e539cd458f6386627ec23f6f6a46a996"
|
||||
expires:
|
||||
- '0'
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
pragma:
|
||||
- no-cache
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
strict-transport-security:
|
||||
- max-age=63072000; includeSubDomains
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- nosniff
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
x-request-id:
|
||||
- REDACTED_ORG_UUID
|
||||
x-runtime:
|
||||
- '0.062954'
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"status": "completed", "duration_ms": 1070, "final_event_count": 7}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '68'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- CrewAI-CLI/1.4.1
|
||||
X-Crewai-Organization-Id:
|
||||
- REDACTED_ORG_UUID
|
||||
X-Crewai-Version:
|
||||
- 1.4.1
|
||||
method: PATCH
|
||||
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches/REDACTED_UUID/finalize
|
||||
response:
|
||||
body:
|
||||
string: '{"id":"REDACTED_UUID","ephemeral_trace_id": "REDACTED_EPHEMERAL_ID","execution_type":"crew","crew_name":"crew","flow_name":null,"status":"completed","duration_ms":1070,"crewai_version":"1.4.1","total_events":7,"execution_context":{"crew_name":"crew","flow_name":null,"privacy_level":"standard","crewai_version":"1.4.1","crew_fingerprint":null},"created_at":"2025-11-15T20:00:40.347Z","updated_at":"2025-11-15T20:00:41.423Z","access_code":
|
||||
"REDACTED_ACCESS_CODE","user_identifier":null}'
|
||||
headers:
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '517'
|
||||
Content-Type:
|
||||
- application/json; charset=utf-8
|
||||
Date:
|
||||
- Sat, 15 Nov 2025 20:00:41 GMT
|
||||
cache-control:
|
||||
- no-store
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
|
||||
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
|
||||
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
|
||||
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
|
||||
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
|
||||
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
|
||||
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
|
||||
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
|
||||
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
|
||||
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
|
||||
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
|
||||
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
|
||||
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
|
||||
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
|
||||
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
|
||||
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
|
||||
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
|
||||
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
|
||||
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
|
||||
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
|
||||
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
|
||||
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
|
||||
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
|
||||
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
|
||||
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
|
||||
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
|
||||
https://drive.google.com https://slides.google.com https://accounts.google.com
|
||||
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
|
||||
https://www.youtube.com https://share.descript.com'
|
||||
etag:
|
||||
- W/"de9bcb107d0382f1b309276d8fc39196"
|
||||
expires:
|
||||
- '0'
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
pragma:
|
||||
- no-cache
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
strict-transport-security:
|
||||
- max-age=63072000; includeSubDomains
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- nosniff
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
x-request-id:
|
||||
- REDACTED_ORG_UUID
|
||||
x-runtime:
|
||||
- '0.045900'
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: !!binary |
|
||||
Ct8QCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkSthAKEgoQY3Jld2FpLnRl
|
||||
bGVtZXRyeRKcCAoQvXQY4SQ+2Mlfdsll/QHJghII0Bd15ezW7r4qDENyZXcgQ3JlYXRlZDABOShe
|
||||
q2uQRngYQZDhtWuQRngYShkKDmNyZXdhaV92ZXJzaW9uEgcKBTEuNC4xShsKDnB5dGhvbl92ZXJz
|
||||
aW9uEgkKBzMuMTIuMTBKLgoIY3Jld19rZXkSIgogZTU5ZjRhOTQ1MDMyOTJhYjg2NTVhODc4Nzlk
|
||||
ZjNkMGVKMQoHY3Jld19pZBImCiQ2NWVkNDMyNS02NTE4LTRiMzUtOGQ3OS02NzA2ZDc5OTY0YWVK
|
||||
OgoQY3Jld19maW5nZXJwcmludBImCiQ1MmM5ODNiOC02OTcwLTQ2ZmMtYmQ1YS0wY2MwNzY1M2Rk
|
||||
NDhKHAoMY3Jld19wcm9jZXNzEgwKCnNlcXVlbnRpYWxKEQoLY3Jld19tZW1vcnkSAhAAShoKFGNy
|
||||
ZXdfbnVtYmVyX29mX3Rhc2tzEgIYAUobChVjcmV3X251bWJlcl9vZl9hZ2VudHMSAhgBSjsKG2Ny
|
||||
ZXdfZmluZ2VycHJpbnRfY3JlYXRlZF9hdBIcChoyMDI1LTExLTE1VDE1OjAwOjQwLjIwOTg4NUrR
|
||||
AgoLY3Jld19hZ2VudHMSwQIKvgJbeyJrZXkiOiAiMGMzZDYzYTY5MGUxM2Y1MTBkZTNjZDZkZmQz
|
||||
MTgxNmIiLCAiaWQiOiAiYjE3OTNkNmYtN2Q4My00Y2YzLWE1NzQtNDE4ZGJkZWNmNzJmIiwgInJv
|
||||
bGUiOiAiVGVzdCBBZ2VudCIsICJ2ZXJib3NlPyI6IGZhbHNlLCAibWF4X2l0ZXIiOiAyNSwgIm1h
|
||||
eF9ycG0iOiBudWxsLCAiZnVuY3Rpb25fY2FsbGluZ19sbG0iOiAiIiwgImxsbSI6ICJncHQtNG8t
|
||||
bWluaSIsICJkZWxlZ2F0aW9uX2VuYWJsZWQ/IjogZmFsc2UsICJhbGxvd19jb2RlX2V4ZWN1dGlv
|
||||
bj8iOiBmYWxzZSwgIm1heF9yZXRyeV9saW1pdCI6IDIsICJ0b29sc19uYW1lcyI6IFtdfV1K/wEK
|
||||
CmNyZXdfdGFza3MS8AEK7QFbeyJrZXkiOiAiMTdjYzlhYjJiMmQwYmIwY2RkMzZkNTNlMDUyYmEz
|
||||
YTEiLCAiaWQiOiAiOTUyY2ZmYzItNjVjNi00ZGMzLTk0MjItMjJiNjk0ZWJjNDU0IiwgImFzeW5j
|
||||
X2V4ZWN1dGlvbj8iOiBmYWxzZSwgImh1bWFuX2lucHV0PyI6IGZhbHNlLCAiYWdlbnRfcm9sZSI6
|
||||
ICJUZXN0IEFnZW50IiwgImFnZW50X2tleSI6ICIwYzNkNjNhNjkwZTEzZjUxMGRlM2NkNmRmZDMx
|
||||
ODE2YiIsICJ0b29sc19uYW1lcyI6IFtdfV16AhgBhQEAAQAAEpwEChCNBcmqTbiktztgYNe6R2lF
|
||||
EgiTrCx+R/HhAioMVGFzayBDcmVhdGVkMAE5uMi/a5BGeBhB+GTAa5BGeBhKLgoIY3Jld19rZXkS
|
||||
IgogZTU5ZjRhOTQ1MDMyOTJhYjg2NTVhODc4NzlkZjNkMGVKMQoHY3Jld19pZBImCiQ2NWVkNDMy
|
||||
NS02NTE4LTRiMzUtOGQ3OS02NzA2ZDc5OTY0YWVKOgoQY3Jld19maW5nZXJwcmludBImCiQ1MmM5
|
||||
ODNiOC02OTcwLTQ2ZmMtYmQ1YS0wY2MwNzY1M2RkNDhKLgoIdGFza19rZXkSIgogMTdjYzlhYjJi
|
||||
MmQwYmIwY2RkMzZkNTNlMDUyYmEzYTFKMQoHdGFza19pZBImCiQ5NTJjZmZjMi02NWM2LTRkYzMt
|
||||
OTQyMi0yMmI2OTRlYmM0NTRKOgoQdGFza19maW5nZXJwcmludBImCiQyMTM3NzZkZC04MDMwLTQ1
|
||||
ODYtYmI1MC02NjNiYjI0NjAwNWJKOwobdGFza19maW5nZXJwcmludF9jcmVhdGVkX2F0EhwKGjIw
|
||||
MjUtMTEtMTVUMTU6MDA6NDAuMjA5ODQwSjsKEWFnZW50X2ZpbmdlcnByaW50EiYKJDVmMmJlOWQw
|
||||
LTZiMjQtNDFiYy05YzQyLTI0ZjdlOTM3MjJjYkoaCgphZ2VudF9yb2xlEgwKClRlc3QgQWdlbnR6
|
||||
AhgBhQEAAQAAEuEDChBC+bce4EVDxB/d79LFgX4NEghWvN23SKW/0SoOVGFzayBFeGVjdXRpb24w
|
||||
ATnYk8BrkEZ4GEHI1LihkEZ4GEouCghjcmV3X2tleRIiCiBlNTlmNGE5NDUwMzI5MmFiODY1NWE4
|
||||
Nzg3OWRmM2QwZUoxCgdjcmV3X2lkEiYKJDY1ZWQ0MzI1LTY1MTgtNGIzNS04ZDc5LTY3MDZkNzk5
|
||||
NjRhZUo6ChBjcmV3X2ZpbmdlcnByaW50EiYKJDUyYzk4M2I4LTY5NzAtNDZmYy1iZDVhLTBjYzA3
|
||||
NjUzZGQ0OEouCgh0YXNrX2tleRIiCiAxN2NjOWFiMmIyZDBiYjBjZGQzNmQ1M2UwNTJiYTNhMUox
|
||||
Cgd0YXNrX2lkEiYKJDk1MmNmZmMyLTY1YzYtNGRjMy05NDIyLTIyYjY5NGViYzQ1NEo7ChFhZ2Vu
|
||||
dF9maW5nZXJwcmludBImCiQ1ZjJiZTlkMC02YjI0LTQxYmMtOWM0Mi0yNGY3ZTkzNzIyY2JKGgoK
|
||||
YWdlbnRfcm9sZRIMCgpUZXN0IEFnZW50SjoKEHRhc2tfZmluZ2VycHJpbnQSJgokMjEzNzc2ZGQt
|
||||
ODAzMC00NTg2LWJiNTAtNjYzYmIyNDYwMDViegIYAYUBAAEAAA==
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '2146'
|
||||
Content-Type:
|
||||
- application/x-protobuf
|
||||
User-Agent:
|
||||
- OTel-OTLP-Exporter-Python/1.38.0
|
||||
method: POST
|
||||
uri: https://telemetry.crewai.com:4319/v1/traces
|
||||
response:
|
||||
body:
|
||||
string: "\n\0"
|
||||
headers:
|
||||
Content-Length:
|
||||
- '2'
|
||||
Content-Type:
|
||||
- application/x-protobuf
|
||||
Date:
|
||||
- Sat, 15 Nov 2025 20:00:44 GMT
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"events": [{"event_id": "6a66ce15-fdb3-490b-a09b-7724817d0116", "timestamp":
|
||||
"2025-11-15T20:15:51.057965+00:00", "type": "crew_kickoff_started", "event_data":
|
||||
{"timestamp": "2025-11-15T20:15:51.057965+00:00", "type": "crew_kickoff_started",
|
||||
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
|
||||
"task_id": null, "task_name": null, "agent_id": null, "agent_role": null, "crew_name":
|
||||
"crew", "crew": null, "inputs": null}}, {"event_id": "15f2b75b-c7bb-48d1-8f61-faec2736da5d",
|
||||
"timestamp": "2025-11-15T20:15:51.059954+00:00", "type": "task_started", "event_data":
|
||||
{"task_description": "Say hello", "expected_output": "hello", "task_name": "Say
|
||||
hello", "context": "", "agent_role": "Test Agent", "task_id": "bbb08fd7-2580-43a8-bc71-5e0c08c7cc61"}},
|
||||
{"event_id": "eb90a87c-523c-40d6-b996-01706cbf8844", "timestamp": "2025-11-15T20:15:51.061205+00:00",
|
||||
"type": "agent_execution_started", "event_data": {"agent_role": "Test Agent",
|
||||
"agent_goal": "Test goal", "agent_backstory": "Test backstory"}}, {"event_id":
|
||||
"862c2b07-d82a-4f02-9c99-519292679a87", "timestamp": "2025-11-15T20:15:51.061443+00:00",
|
||||
"type": "llm_call_started", "event_data": {"timestamp": "2025-11-15T20:15:51.061443+00:00",
|
||||
"type": "llm_call_started", "source_fingerprint": null, "source_type": null,
|
||||
"fingerprint_metadata": null, "task_id": "bbb08fd7-2580-43a8-bc71-5e0c08c7cc61",
|
||||
"task_name": "Say hello", "agent_id": "82ee52ae-9eba-4648-877b-8cf2fc1624ae",
|
||||
"agent_role": "Test Agent", "from_task": null, "from_agent": null, "model":
|
||||
"gpt-4o-mini", "messages": [{"role": "system", "content": "You are Test Agent.
|
||||
Test backstory\nYour personal goal is: Test goal\nTo give my best complete final
|
||||
answer to the task respond using the exact following format:\n\nThought: I now
|
||||
can give a great answer\nFinal Answer: Your final answer must be the great and
|
||||
the most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task:
|
||||
Say hello\n\nThis is the expected criteria for your final answer: hello\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}], "tools": null, "callbacks":
|
||||
["<crewai.utilities.token_counter_callback.TokenCalcHandler object at 0x10d617f50>"],
|
||||
"available_functions": null}}, {"event_id": "fff5720d-9167-48cf-9196-9ee96f765688",
|
||||
"timestamp": "2025-11-15T20:15:51.175710+00:00", "type": "llm_call_completed",
|
||||
"event_data": {"timestamp": "2025-11-15T20:15:51.175710+00:00", "type": "llm_call_completed",
|
||||
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
|
||||
"task_id": "bbb08fd7-2580-43a8-bc71-5e0c08c7cc61", "task_name": "Say hello",
|
||||
"agent_id": "82ee52ae-9eba-4648-877b-8cf2fc1624ae", "agent_role": "Test Agent",
|
||||
"from_task": null, "from_agent": null, "messages": [{"role": "system", "content":
|
||||
"You are Test Agent. Test backstory\nYour personal goal is: Test goal\nTo give
|
||||
my best complete final answer to the task respond using the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
||||
"content": "\nCurrent Task: Say hello\n\nThis is the expected criteria for your
|
||||
final answer: hello\nyou MUST return the actual complete content as the final
|
||||
answer, not a summary.\n\nBegin! This is VERY important to you, use the tools
|
||||
available and give your best Final Answer, your job depends on it!\n\nThought:"}],
|
||||
"response": "I now can give a great answer \nFinal Answer: hello", "call_type":
|
||||
"<LLMCallType.LLM_CALL: ''llm_call''>", "model": "gpt-4o-mini"}}, {"event_id":
|
||||
"1ce38e05-20f8-4f6b-b303-720dbcbb73b2", "timestamp": "2025-11-15T20:15:51.175899+00:00",
|
||||
"type": "agent_execution_completed", "event_data": {"agent_role": "Test Agent",
|
||||
"agent_goal": "Test goal", "agent_backstory": "Test backstory"}}, {"event_id":
|
||||
"dca0b4dd-dcfe-4002-9251-56cde6855f33", "timestamp": "2025-11-15T20:15:51.176016+00:00",
|
||||
"type": "task_completed", "event_data": {"task_description": "Say hello", "task_name":
|
||||
"Say hello", "task_id": "bbb08fd7-2580-43a8-bc71-5e0c08c7cc61", "output_raw":
|
||||
"hello", "output_format": "OutputFormat.RAW", "agent_role": "Test Agent"}},
|
||||
{"event_id": "7e3993e7-e729-43a9-af63-b1429d0d2abc", "timestamp": "2025-11-15T20:15:51.177161+00:00",
|
||||
"type": "crew_kickoff_completed", "event_data": {"timestamp": "2025-11-15T20:15:51.177161+00:00",
|
||||
"type": "crew_kickoff_completed", "source_fingerprint": null, "source_type":
|
||||
null, "fingerprint_metadata": null, "task_id": null, "task_name": null, "agent_id":
|
||||
null, "agent_role": null, "crew_name": "crew", "crew": null, "output": {"description":
|
||||
"Say hello", "name": "Say hello", "expected_output": "hello", "summary": "Say
|
||||
hello...", "raw": "hello", "pydantic": null, "json_dict": null, "agent": "Test
|
||||
Agent", "output_format": "raw", "messages": [{"role": "''system''", "content":
|
||||
"''You are Test Agent. Test backstory\\nYour personal goal is: Test goal\\nTo
|
||||
give my best complete final answer to the task respond using the exact following
|
||||
format:\\n\\nThought: I now can give a great answer\\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\\n\\nI MUST use these formats, my job depends on it!''"}, {"role":
|
||||
"''user''", "content": "''\\nCurrent Task: Say hello\\n\\nThis is the expected
|
||||
criteria for your final answer: hello\\nyou MUST return the actual complete
|
||||
content as the final answer, not a summary.\\n\\nBegin! This is VERY important
|
||||
to you, use the tools available and give your best Final Answer, your job depends
|
||||
on it!\\n\\nThought:''"}, {"role": "''assistant''", "content": "''I now can
|
||||
give a great answer \\nFinal Answer: hello''"}]}, "total_tokens": 165}}], "batch_metadata":
|
||||
{"events_count": 8, "batch_sequence": 1, "is_final_batch": false}}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '6047'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- CrewAI-CLI/1.4.1
|
||||
X-Crewai-Organization-Id:
|
||||
- 73c2b193-f579-422c-84c7-76a39a1da77f
|
||||
X-Crewai-Version:
|
||||
- 1.4.1
|
||||
method: POST
|
||||
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches/REDACTED_EPHEMERAL_ID/events
|
||||
response:
|
||||
body:
|
||||
string: '{"error":"Couldn''t find EphemeralTraceBatch with [WHERE \"ephemeral_trace_batches\".\"ephemeral_trace_id\"
|
||||
= $1]","message":"Trace batch not found"}'
|
||||
headers:
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '148'
|
||||
Content-Type:
|
||||
- application/json; charset=utf-8
|
||||
Date:
|
||||
- Sat, 15 Nov 2025 20:15:51 GMT
|
||||
cache-control:
|
||||
- no-store
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
|
||||
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
|
||||
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
|
||||
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
|
||||
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
|
||||
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
|
||||
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
|
||||
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
|
||||
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
|
||||
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
|
||||
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
|
||||
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
|
||||
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
|
||||
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
|
||||
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
|
||||
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
|
||||
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
|
||||
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
|
||||
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
|
||||
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
|
||||
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
|
||||
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
|
||||
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
|
||||
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
|
||||
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
|
||||
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
|
||||
https://drive.google.com https://slides.google.com https://accounts.google.com
|
||||
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
|
||||
https://www.youtube.com https://share.descript.com'
|
||||
expires:
|
||||
- '0'
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
pragma:
|
||||
- no-cache
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
strict-transport-security:
|
||||
- max-age=63072000; includeSubDomains
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- nosniff
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
x-request-id:
|
||||
- 255abbea-b49c-4dcc-ade5-3e16fd59277d
|
||||
x-runtime:
|
||||
- '0.050642'
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
status:
|
||||
code: 404
|
||||
message: Not Found
|
||||
- request:
|
||||
body: '{"status": "failed", "failure_reason": "Error sending events to backend"}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '73'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- CrewAI-CLI/1.4.1
|
||||
X-Crewai-Organization-Id:
|
||||
- 73c2b193-f579-422c-84c7-76a39a1da77f
|
||||
X-Crewai-Version:
|
||||
- 1.4.1
|
||||
method: PATCH
|
||||
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/batches/REDACTED_EPHEMERAL_ID
|
||||
response:
|
||||
body:
|
||||
string: '{"error":"bad_credentials","message":"Bad credentials"}'
|
||||
headers:
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '55'
|
||||
Content-Type:
|
||||
- application/json; charset=utf-8
|
||||
Date:
|
||||
- Sat, 15 Nov 2025 20:15:51 GMT
|
||||
cache-control:
|
||||
- no-store
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
|
||||
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
|
||||
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
|
||||
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
|
||||
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
|
||||
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
|
||||
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
|
||||
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
|
||||
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
|
||||
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
|
||||
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
|
||||
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
|
||||
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
|
||||
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
|
||||
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
|
||||
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
|
||||
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
|
||||
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
|
||||
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
|
||||
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
|
||||
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
|
||||
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
|
||||
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
|
||||
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
|
||||
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
|
||||
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
|
||||
https://drive.google.com https://slides.google.com https://accounts.google.com
|
||||
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
|
||||
https://www.youtube.com https://share.descript.com'
|
||||
expires:
|
||||
- '0'
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
pragma:
|
||||
- no-cache
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
strict-transport-security:
|
||||
- max-age=63072000; includeSubDomains
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- nosniff
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
x-request-id:
|
||||
- 7bbda7a6-5a8e-4dfc-bcef-fe9b8bff7532
|
||||
x-runtime:
|
||||
- '0.042800'
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
status:
|
||||
code: 401
|
||||
message: Unauthorized
|
||||
version: 1
|
||||
@@ -1,30 +1,30 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Test Agent. Test backstory\nYour
|
||||
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test backstory\nYour
|
||||
personal goal is: Test goal\nTo give my best complete final answer to the task
|
||||
respond using the exact following format:\n\nThought: I now can give a great
|
||||
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||
depends on it!"},{"role":"user","content":"\nCurrent Task: Test task\n\nThis
|
||||
depends on it!"}, {"role": "user", "content": "\nCurrent Task: Test task\n\nThis
|
||||
is the expected criteria for your final answer: test output\nyou MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:"}],"model":"gpt-4o-mini"}'
|
||||
your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '774'
|
||||
- '812'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
- OpenAI/Python 1.93.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -34,37 +34,33 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
- 1.93.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFTBjhs3DL37K4i59DI2bHe9m/rWBCmQFkWLdlEgbQODK3FmlNWQU5Hj
|
||||
2A323wNpvGtvs4deBiM9PurxUdTnGUAVfLWFynVorh/i/I376X3bxHd//LJa/eZXt4F/bOjPn39d
|
||||
/v72zb9VnRly95GcPbIWTvohkgXhCXaJ0ChnXd1cf7verDbr6wL04ilmWjvY/ErmfeAwXy/XV/Pl
|
||||
zXz16sTuJDjSagt/zQAAPpdv1smeDtUWlvXjTk+q2FK1fQoCqJLEvFOhalBDtqo+g07YiIv0d8Dy
|
||||
CRwytGFPgNBm2YCsnygB/M0/BMYI35f1Fm47AjoM5Iw8uBSMUkBoJIF1BE2JPXGDggkMSfbBE2R3
|
||||
EnXEmo8J3EjqMZsFwoWrY7ETEsVsW+bmbSM1MNT7Bdx2QSGwi6On/DP3NFgHyBiPGrTOVNojG9AB
|
||||
cy+0BiaX3UlH8GhYA7IHFwlTriIiFwkK1qGBQ6P0eG6x6GAgzSRBRhtGWxQDMPSn6oh1TDTRaU/p
|
||||
CKjZnELL6lHvc6iTPaVcVCdJxraLx6xWx2iBWwiTA72oATUNOSutYH/2qayLrYOohrtIC3h9hEbc
|
||||
qDnFZKJOPgsTm9Zft0Q7GaMHFgPheISeyCbzB3KhCZc9vRsNMKoAHRyRP3V98qsGT72wWsJSgIuY
|
||||
gh1rGBK5oEH45PQ0EsSkJ4/R+0SqpE/2fKOQ6J8xJOqz6ucXJR4Xl/c2UTMq5tnhMcYLAJnlpC1P
|
||||
zIcT8vA0I1HaIcmd/odaNYGDdrtEqMJ5HtRkqAr6MAP4UGZxfDZe1ZCkH2xnck/luNXmaspXnZ+A
|
||||
C3T16oSaGMYzsL5Z1y8k3HkyDFEvxrly6DryZ+p59nH0QS6A2UXZX8t5KfdUeuD2/6Q/A87RYOR3
|
||||
QyIf3POSz2GJPpan4uWwJ5uL4Eop7YOjnQVKuRWeGhzj9HBVelSjftcEbikNKUyvVzPsNtdLbK5p
|
||||
s/mumj3MvgAAAP//AwAmD0HmywUAAA==
|
||||
H4sIAAAAAAAAAwAAAP//jFLLbtswELzrKxY8W4WV+JHoVgR95NJD4UvaBgJDrSS2FJclV3bSwP9e
|
||||
kHYsuU2BXghwZ2c4s8vnDEDoWpQgVCdZ9c7kNx+4v7vb7jafnrrPX25/vtObX48f1Q31m+uFmEUG
|
||||
PXxHxS+sN4p6Z5A12QOsPErGqFqsl1fF4nJdzBPQU40m0lrH+YLyXludX8wvFvl8nRdXR3ZHWmEQ
|
||||
JXzNAACe0xl92hofRQlJK1V6DEG2KMpTE4DwZGJFyBB0YGlZzEZQkWW0yfotWNqBkhZavUWQ0Ebb
|
||||
IG3YoQf4Zt9rKw28TfcSNhgYaGA3nAl6bIYgYyg7GDMBpLXEMg4lRbk/IvuTeUOt8/QQ/qCKRlsd
|
||||
usqjDGSj0cDkREL3GcB9GtJwlls4T73jiukHpueK5eKgJ8bdTNDLI8jE0kzqq/XsFb2qRpbahMmY
|
||||
hZKqw3qkjjuRQ61pAmST1H+7eU37kFzb9n/kR0ApdIx15TzWWp0nHts8xq/7r7bTlJNhEdBvtcKK
|
||||
Nfq4iRobOZjD/kV4Cox91WjbondeH35V46rlai6bFS6X1yLbZ78BAAD//wMAZdfoWWMDAAA=
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 99f2bc8f6f4dfab6-SJC
|
||||
- 980b9e0c5fa516a0-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -72,14 +68,14 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sun, 16 Nov 2025 00:05:27 GMT
|
||||
- Wed, 17 Sep 2025 21:15:11 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=REDACTED;
|
||||
path=/; expires=Sun, 16-Nov-25 00:35:27 GMT; domain=.api.openai.com; HttpOnly;
|
||||
- __cf_bm=w6UZxbAZgYg9EFkKPfrSbMK97MB4jfs7YyvcEmgkvak-1758143711-1.0.1.1-j7YC1nvoMKxYK0T.5G2XDF6TXUCPu_HUs4YO9v65r3NHQFIcOaHbQXX4vqabSgynL2tZy23pbZgD8Cdmxhdw9dp4zkAXhU.imP43_pw4dSE;
|
||||
path=/; expires=Wed, 17-Sep-25 21:45:11 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=REDACTED;
|
||||
- _cfuvid=ij9Q8tB7sj2GczANlJ7gbXVjj6hMhz1iVb6oGHuRYu8-1758143711202-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
@@ -94,15 +90,15 @@ interactions:
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- test-org
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '1493'
|
||||
- '462'
|
||||
openai-project:
|
||||
- proj_test123
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '1733'
|
||||
- '665'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-tokens:
|
||||
@@ -112,11 +108,11 @@ interactions:
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999832'
|
||||
- '149999830'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999832'
|
||||
- '149999830'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
@@ -124,7 +120,7 @@ interactions:
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_test123
|
||||
- req_04536db97c8c4768a200e38c1368c176
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
|
||||
@@ -590,7 +590,7 @@ interactions:
|
||||
"<function BaseTool.<lambda> at 0x107389260>", "result_as_answer": "False",
|
||||
"max_usage_count": "None", "current_usage_count": "0"}], "max_iter": 2, "agent_executor":
|
||||
"<crewai.agents.crew_agent_executor.CrewAgentExecutor object at 0x130de6540>",
|
||||
"llm": "<crewai.llms.providers.openai.completion.OpenAICompletion object at
|
||||
"llm": "<crewai.llm.providers.openai.completion.OpenAICompletion object at
|
||||
0x130db6de0>", "crew": {"parent_flow": null, "name": "crew", "cache": true,
|
||||
"tasks": ["{''used_tools'': 0, ''tools_errors'': 0, ''delegations'': 0, ''i18n'':
|
||||
{''prompt_file'': None}, ''name'': None, ''prompt_context'': '''', ''description'':
|
||||
@@ -605,7 +605,7 @@ interactions:
|
||||
''description_updated'': False, ''cache_function'': <function BaseTool.<lambda>
|
||||
at 0x107389260>, ''result_as_answer'': False, ''max_usage_count'': None, ''current_usage_count'':
|
||||
0}], ''max_iter'': 2, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x130de6540>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x130de6540>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x130db6de0>, ''crew'': Crew(id=991ac83f-9a29-411f-b0a0-0a335c7a2d0e,
|
||||
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
|
||||
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
|
||||
@@ -634,7 +634,7 @@ interactions:
|
||||
''abc.Learn_About_Ai''>, ''description_updated'': False, ''cache_function'':
|
||||
<function BaseTool.<lambda> at 0x107389260>, ''result_as_answer'': False, ''max_usage_count'':
|
||||
None, ''current_usage_count'': 0}], ''max_iter'': 2, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x130de6540>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x130de6540>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x130db6de0>, ''crew'': Crew(id=991ac83f-9a29-411f-b0a0-0a335c7a2d0e,
|
||||
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
|
||||
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
|
||||
@@ -657,7 +657,7 @@ interactions:
|
||||
{"fingerprint": {"metadata": "{}"}}, "callbacks": [], "adapted_agent": false,
|
||||
"knowledge_config": null, "max_execution_time": null, "agent_ops_agent_name":
|
||||
"test role", "agent_ops_agent_id": null, "step_callback": null, "use_system_prompt":
|
||||
true, "function_calling_llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
true, "function_calling_llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x130db7020>", "system_template": null, "prompt_template": null, "response_template":
|
||||
null, "allow_code_execution": false, "respect_context_window": true, "max_retry_limit":
|
||||
2, "multimodal": false, "inject_date": false, "date_format": "%Y-%m-%d", "code_execution_mode":
|
||||
@@ -1068,7 +1068,7 @@ interactions:
|
||||
"<function BaseTool.<lambda> at 0x107e394e0>", "result_as_answer": "False",
|
||||
"max_usage_count": "None", "current_usage_count": "0"}], "max_iter": 2, "agent_executor":
|
||||
"<crewai.agents.crew_agent_executor.CrewAgentExecutor object at 0x13b37c980>",
|
||||
"llm": "<crewai.llms.providers.openai.completion.OpenAICompletion object at
|
||||
"llm": "<crewai.llm.providers.openai.completion.OpenAICompletion object at
|
||||
0x13b7563c0>", "crew": {"parent_flow": null, "name": "crew", "cache": true,
|
||||
"tasks": ["{''used_tools'': 0, ''tools_errors'': 0, ''delegations'': 0, ''i18n'':
|
||||
{''prompt_file'': None}, ''name'': None, ''prompt_context'': '''', ''description'':
|
||||
@@ -1083,7 +1083,7 @@ interactions:
|
||||
''description_updated'': False, ''cache_function'': <function BaseTool.<lambda>
|
||||
at 0x107e394e0>, ''result_as_answer'': False, ''max_usage_count'': None, ''current_usage_count'':
|
||||
0}], ''max_iter'': 2, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x13b37c980>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13b37c980>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13b7563c0>, ''crew'': Crew(id=f38365e9-3206-45b6-8754-950cb03fe57e,
|
||||
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
|
||||
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
|
||||
@@ -1112,7 +1112,7 @@ interactions:
|
||||
''abc.Learn_About_Ai''>, ''description_updated'': False, ''cache_function'':
|
||||
<function BaseTool.<lambda> at 0x107e394e0>, ''result_as_answer'': False, ''max_usage_count'':
|
||||
None, ''current_usage_count'': 0}], ''max_iter'': 2, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x13b37c980>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13b37c980>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13b7563c0>, ''crew'': Crew(id=f38365e9-3206-45b6-8754-950cb03fe57e,
|
||||
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
|
||||
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
|
||||
@@ -1135,7 +1135,7 @@ interactions:
|
||||
{"fingerprint": {"metadata": "{}"}}, "callbacks": [], "adapted_agent": false,
|
||||
"knowledge_config": null, "max_execution_time": null, "agent_ops_agent_name":
|
||||
"test role", "agent_ops_agent_id": null, "step_callback": null, "use_system_prompt":
|
||||
true, "function_calling_llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
true, "function_calling_llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13b756690>", "system_template": null, "prompt_template": null, "response_template":
|
||||
null, "allow_code_execution": false, "respect_context_window": true, "max_retry_limit":
|
||||
2, "multimodal": false, "inject_date": false, "date_format": "%Y-%m-%d", "code_execution_mode":
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1274,7 +1274,7 @@ interactions:
|
||||
"b6cf723e-04c8-40c5-a927-e2078cfbae59", "role": "test role", "goal": "test goal",
|
||||
"backstory": "test backstory", "cache": true, "verbose": true, "max_rpm": null,
|
||||
"allow_delegation": false, "tools": [], "max_iter": 6, "agent_executor": "<crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x13ab0abd0>", "llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13ab0abd0>", "llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13ab0b050>", "crew": {"parent_flow": null, "name": "crew", "cache":
|
||||
true, "tasks": ["{''used_tools'': 0, ''tools_errors'': 0, ''delegations'': 0,
|
||||
''i18n'': {''prompt_file'': None}, ''name'': None, ''prompt_context'': '''',
|
||||
@@ -1285,7 +1285,7 @@ interactions:
|
||||
''test role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'':
|
||||
True, ''verbose'': True, ''max_rpm'': None, ''allow_delegation'': False, ''tools'':
|
||||
[], ''max_iter'': 6, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x13ab0abd0>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13ab0abd0>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13ab0b050>, ''crew'': Crew(id=004dd8a0-dd87-43fa-bdc8-07f449808028,
|
||||
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
|
||||
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
|
||||
@@ -1309,7 +1309,7 @@ interactions:
|
||||
role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'':
|
||||
True, ''verbose'': True, ''max_rpm'': None, ''allow_delegation'': False, ''tools'':
|
||||
[], ''max_iter'': 6, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x13ab0abd0>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13ab0abd0>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13ab0b050>, ''crew'': Crew(id=004dd8a0-dd87-43fa-bdc8-07f449808028,
|
||||
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
|
||||
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
|
||||
@@ -1491,7 +1491,7 @@ interactions:
|
||||
"goal": "test goal", "backstory": "test backstory", "cache": true, "verbose":
|
||||
true, "max_rpm": null, "allow_delegation": false, "tools": [], "max_iter": 6,
|
||||
"agent_executor": "<crewai.agents.crew_agent_executor.CrewAgentExecutor object
|
||||
at 0x13ab0abd0>", "llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
at 0x13ab0abd0>", "llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13ab0b050>", "crew": {"parent_flow": null, "name": "crew", "cache":
|
||||
true, "tasks": ["{''used_tools'': 2, ''tools_errors'': 0, ''delegations'': 0,
|
||||
''i18n'': {''prompt_file'': None}, ''name'': None, ''prompt_context'': '''',
|
||||
@@ -1502,7 +1502,7 @@ interactions:
|
||||
''test role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'':
|
||||
True, ''verbose'': True, ''max_rpm'': None, ''allow_delegation'': False, ''tools'':
|
||||
[], ''max_iter'': 6, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x13ab0abd0>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13ab0abd0>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13ab0b050>, ''crew'': Crew(id=004dd8a0-dd87-43fa-bdc8-07f449808028,
|
||||
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
|
||||
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
|
||||
@@ -1528,7 +1528,7 @@ interactions:
|
||||
role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'':
|
||||
True, ''verbose'': True, ''max_rpm'': None, ''allow_delegation'': False, ''tools'':
|
||||
[], ''max_iter'': 6, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x13ab0abd0>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13ab0abd0>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13ab0b050>, ''crew'': Crew(id=004dd8a0-dd87-43fa-bdc8-07f449808028,
|
||||
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
|
||||
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
|
||||
@@ -1660,7 +1660,7 @@ interactions:
|
||||
role", "goal": "test goal", "backstory": "test backstory", "cache": true, "verbose":
|
||||
true, "max_rpm": null, "allow_delegation": false, "tools": [], "max_iter": 6,
|
||||
"agent_executor": "<crewai.agents.crew_agent_executor.CrewAgentExecutor object
|
||||
at 0x13ab0abd0>", "llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
at 0x13ab0abd0>", "llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13ab0b050>", "crew": {"parent_flow": null, "name": "crew", "cache":
|
||||
true, "tasks": ["{''used_tools'': 3, ''tools_errors'': 0, ''delegations'': 0,
|
||||
''i18n'': {''prompt_file'': None}, ''name'': None, ''prompt_context'': '''',
|
||||
@@ -1671,7 +1671,7 @@ interactions:
|
||||
''test role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'':
|
||||
True, ''verbose'': True, ''max_rpm'': None, ''allow_delegation'': False, ''tools'':
|
||||
[], ''max_iter'': 6, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x13ab0abd0>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13ab0abd0>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13ab0b050>, ''crew'': Crew(id=004dd8a0-dd87-43fa-bdc8-07f449808028,
|
||||
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
|
||||
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
|
||||
@@ -1698,7 +1698,7 @@ interactions:
|
||||
role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'':
|
||||
True, ''verbose'': True, ''max_rpm'': None, ''allow_delegation'': False, ''tools'':
|
||||
[], ''max_iter'': 6, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x13ab0abd0>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13ab0abd0>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13ab0b050>, ''crew'': Crew(id=004dd8a0-dd87-43fa-bdc8-07f449808028,
|
||||
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
|
||||
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
|
||||
@@ -1839,7 +1839,7 @@ interactions:
|
||||
"goal": "test goal", "backstory": "test backstory", "cache": true, "verbose":
|
||||
true, "max_rpm": null, "allow_delegation": false, "tools": [], "max_iter": 6,
|
||||
"agent_executor": "<crewai.agents.crew_agent_executor.CrewAgentExecutor object
|
||||
at 0x13ab0abd0>", "llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
at 0x13ab0abd0>", "llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13ab0b050>", "crew": {"parent_flow": null, "name": "crew", "cache":
|
||||
true, "tasks": ["{''used_tools'': 4, ''tools_errors'': 0, ''delegations'': 0,
|
||||
''i18n'': {''prompt_file'': None}, ''name'': None, ''prompt_context'': '''',
|
||||
@@ -1850,7 +1850,7 @@ interactions:
|
||||
''test role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'':
|
||||
True, ''verbose'': True, ''max_rpm'': None, ''allow_delegation'': False, ''tools'':
|
||||
[], ''max_iter'': 6, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x13ab0abd0>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13ab0abd0>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13ab0b050>, ''crew'': Crew(id=004dd8a0-dd87-43fa-bdc8-07f449808028,
|
||||
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
|
||||
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
|
||||
@@ -1879,7 +1879,7 @@ interactions:
|
||||
role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'':
|
||||
True, ''verbose'': True, ''max_rpm'': None, ''allow_delegation'': False, ''tools'':
|
||||
[], ''max_iter'': 6, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x13ab0abd0>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13ab0abd0>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13ab0b050>, ''crew'': Crew(id=004dd8a0-dd87-43fa-bdc8-07f449808028,
|
||||
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
|
||||
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
|
||||
@@ -2029,7 +2029,7 @@ interactions:
|
||||
"goal": "test goal", "backstory": "test backstory", "cache": true, "verbose":
|
||||
true, "max_rpm": null, "allow_delegation": false, "tools": [], "max_iter": 6,
|
||||
"agent_executor": "<crewai.agents.crew_agent_executor.CrewAgentExecutor object
|
||||
at 0x13ab0abd0>", "llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
at 0x13ab0abd0>", "llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13ab0b050>", "crew": {"parent_flow": null, "name": "crew", "cache":
|
||||
true, "tasks": ["{''used_tools'': 5, ''tools_errors'': 0, ''delegations'': 0,
|
||||
''i18n'': {''prompt_file'': None}, ''name'': None, ''prompt_context'': '''',
|
||||
@@ -2040,7 +2040,7 @@ interactions:
|
||||
''test role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'':
|
||||
True, ''verbose'': True, ''max_rpm'': None, ''allow_delegation'': False, ''tools'':
|
||||
[], ''max_iter'': 6, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x13ab0abd0>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13ab0abd0>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13ab0b050>, ''crew'': Crew(id=004dd8a0-dd87-43fa-bdc8-07f449808028,
|
||||
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
|
||||
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
|
||||
@@ -2070,7 +2070,7 @@ interactions:
|
||||
role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'':
|
||||
True, ''verbose'': True, ''max_rpm'': None, ''allow_delegation'': False, ''tools'':
|
||||
[], ''max_iter'': 6, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x13ab0abd0>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13ab0abd0>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13ab0b050>, ''crew'': Crew(id=004dd8a0-dd87-43fa-bdc8-07f449808028,
|
||||
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
|
||||
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
|
||||
|
||||
@@ -1082,7 +1082,7 @@ interactions:
|
||||
"role": "test role", "goal": "test goal", "backstory": "test backstory", "cache":
|
||||
true, "verbose": true, "max_rpm": 10, "allow_delegation": false, "tools": [],
|
||||
"max_iter": 4, "agent_executor": "<crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x133d41100>", "llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x133d41100>", "llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x133d40500>", "crew": {"parent_flow": null, "name": "crew", "cache":
|
||||
true, "tasks": ["{''used_tools'': 0, ''tools_errors'': 0, ''delegations'': 0,
|
||||
''i18n'': {''prompt_file'': None}, ''name'': None, ''prompt_context'': '''',
|
||||
@@ -1093,7 +1093,7 @@ interactions:
|
||||
''test role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'':
|
||||
True, ''verbose'': True, ''max_rpm'': 10, ''allow_delegation'': False, ''tools'':
|
||||
[], ''max_iter'': 4, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x133d41100>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x133d41100>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x133d40500>, ''crew'': Crew(id=4c6d502e-f6ec-446a-8f76-644563c4aa94,
|
||||
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
|
||||
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
|
||||
@@ -1117,7 +1117,7 @@ interactions:
|
||||
''test role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'':
|
||||
True, ''verbose'': True, ''max_rpm'': 10, ''allow_delegation'': False, ''tools'':
|
||||
[], ''max_iter'': 4, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x133d41100>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x133d41100>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x133d40500>, ''crew'': Crew(id=4c6d502e-f6ec-446a-8f76-644563c4aa94,
|
||||
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
|
||||
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
|
||||
@@ -1910,7 +1910,7 @@ interactions:
|
||||
"role": "test role", "goal": "test goal", "backstory": "test backstory", "cache":
|
||||
true, "verbose": true, "max_rpm": 10, "allow_delegation": false, "tools": [],
|
||||
"max_iter": 4, "agent_executor": "<crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x10308d610>", "llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x10308d610>", "llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x129201640>", "crew": {"parent_flow": null, "name": "crew", "cache":
|
||||
true, "tasks": ["{''used_tools'': 0, ''tools_errors'': 0, ''delegations'': 0,
|
||||
''i18n'': {''prompt_file'': None}, ''name'': None, ''prompt_context'': '''',
|
||||
@@ -1921,7 +1921,7 @@ interactions:
|
||||
''test role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'':
|
||||
True, ''verbose'': True, ''max_rpm'': 10, ''allow_delegation'': False, ''tools'':
|
||||
[], ''max_iter'': 4, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x10308d610>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x10308d610>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x129201640>, ''crew'': Crew(id=1a07d718-fed5-49fa-bee2-de2db91c9f33,
|
||||
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
|
||||
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
|
||||
@@ -1945,7 +1945,7 @@ interactions:
|
||||
''test role'', ''goal'': ''test goal'', ''backstory'': ''test backstory'', ''cache'':
|
||||
True, ''verbose'': True, ''max_rpm'': 10, ''allow_delegation'': False, ''tools'':
|
||||
[], ''max_iter'': 4, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x10308d610>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x10308d610>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x129201640>, ''crew'': Crew(id=1a07d718-fed5-49fa-bee2-de2db91c9f33,
|
||||
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
|
||||
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
|
||||
|
||||
@@ -1,4 +1,103 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"trace_id": "9d9d9d14-e5bc-44bc-8cfc-3df9ba4e6055", "execution_type":
|
||||
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
|
||||
"crew_name": "crew", "flow_name": null, "crewai_version": "1.3.0", "privacy_level":
|
||||
"standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count":
|
||||
0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-11-06T15:58:15.778396+00:00"},
|
||||
"ephemeral_trace_id": "9d9d9d14-e5bc-44bc-8cfc-3df9ba4e6055"}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate, zstd
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '488'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- CrewAI-CLI/1.3.0
|
||||
X-Crewai-Version:
|
||||
- 1.3.0
|
||||
method: POST
|
||||
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches
|
||||
response:
|
||||
body:
|
||||
string: '{"id":"f303021e-f1a0-4fd8-9c7d-8ba6779f8ad3","ephemeral_trace_id":"9d9d9d14-e5bc-44bc-8cfc-3df9ba4e6055","execution_type":"crew","crew_name":"crew","flow_name":null,"status":"running","duration_ms":null,"crewai_version":"1.3.0","total_events":0,"execution_context":{"crew_fingerprint":null,"crew_name":"crew","flow_name":null,"crewai_version":"1.3.0","privacy_level":"standard"},"created_at":"2025-11-06T15:58:16.189Z","updated_at":"2025-11-06T15:58:16.189Z","access_code":"TRACE-c2990cd4d4","user_identifier":null}'
|
||||
headers:
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '515'
|
||||
Content-Type:
|
||||
- application/json; charset=utf-8
|
||||
Date:
|
||||
- Thu, 06 Nov 2025 15:58:16 GMT
|
||||
cache-control:
|
||||
- no-store
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
|
||||
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
|
||||
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
|
||||
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
|
||||
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
|
||||
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
|
||||
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
|
||||
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
|
||||
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
|
||||
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
|
||||
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
|
||||
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
|
||||
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
|
||||
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
|
||||
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
|
||||
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
|
||||
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
|
||||
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
|
||||
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
|
||||
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
|
||||
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
|
||||
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
|
||||
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
|
||||
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
|
||||
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
|
||||
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
|
||||
https://drive.google.com https://slides.google.com https://accounts.google.com
|
||||
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
|
||||
https://www.youtube.com https://share.descript.com'
|
||||
etag:
|
||||
- W/"8df0b730688b8bc094b74c66a6293578"
|
||||
expires:
|
||||
- '0'
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
pragma:
|
||||
- no-cache
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
strict-transport-security:
|
||||
- max-age=63072000; includeSubDomains
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- nosniff
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
x-request-id:
|
||||
- 38352441-7508-4e1e-9bff-77d1689dffdf
|
||||
x-runtime:
|
||||
- '0.085540'
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
status:
|
||||
code: 201
|
||||
message: Created
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"Your goal is to rewrite the user
|
||||
query so that it is optimized for retrieval from a vector database. Consider
|
||||
@@ -16,7 +115,7 @@ interactions:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
@@ -44,23 +143,23 @@ interactions:
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFLBTtwwFLznKyxfetmg3YXspnutCmpVIS70UqHI2C/JK46fZb+sQGj/
|
||||
HTlZNqGA1IsPnjfjmfF7zoSQaOROSN0q1p23+Te9rh8vr67tj+99Wdw8NDc/Xdy32KvbX71cJAbd
|
||||
/wXNr6wzTZ23wEhuhHUAxZBUV9vN+apcb8tiADoyYBOt8ZxfUN6hw3y9XF/ky22+Ko/sllBDlDvx
|
||||
JxNCiOfhTD6dgUe5E8vF600HMaoG5O40JIQMZNONVDFiZOVYLiZQk2Nwg/XfaJC/RFGrPQVkEJos
|
||||
hbP5dIC6jyo5dr21M0A5R6xS4sHn3RE5nJxZanyg+/gPVdboMLZVABXJJReRycsBPWRC3A0N9G9C
|
||||
SR+o81wxPcDw3Gp7PurJqfgJLY4YEys7J5WLD+QqA6zQxlmFUivdgpmoU9+qN0gzIJuFfm/mI+0x
|
||||
OLrmf+QnQGvwDKbyAQzqt4GnsQBpLT8bO5U8GJYRwh41VIwQ0kcYqFVvx2WR8SkydFWNroHgA44b
|
||||
U/uq2CxVvYGi+CqzQ/YCAAD//wMAZMa5Sz8DAAA=
|
||||
H4sIAAAAAAAAAwAAAP//jFJBbtswELzrFQQvvViBLMuy42sObYEWKIoiQFMEAkOu5G0oLkGu0xaB
|
||||
/15QciwlTYFceODsDGeG+5gJIdHInZB6r1j33uZX33+1H78y9tvVt+Lmpv68KrfXX96Xnz7cu61c
|
||||
JAbd/QTNT6wLTb23wEhuhHUAxZBUl5u6rKqqvqwHoCcDNtE6z3lFeY8O87Ioq7zY5MuTuN4Taohy
|
||||
J35kQgjxOJzJpzPwW+5EsXi66SFG1YHcnYeEkIFsupEqRoysHMvFBGpyDG6wfo0G+V0UrXqggAxC
|
||||
k6VwMZ8O0B6iSo7dwdoZoJwjVinx4PP2hBzPzix1PtBdfEGVLTqM+yaAiuSSi8jk5YAeMyFuhwYO
|
||||
z0JJH6j33DDdw/DccrMa9eRU/ISuTxgTKzsnbRevyDUGWKGNswqlVnoPZqJOfauDQZoB2Sz0v2Ze
|
||||
0x6Do+veIj8BWoNnMI0PYFA/DzyNBUhr+b+xc8mDYRkhPKCGhhFC+ggDrTrYcVlk/BMZ+qZF10Hw
|
||||
AceNaX2zrgvV1rBeX8rsmP0FAAD//wMA5SIzeT8DAAA=
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 99ec2e536dcc3c7d-SJC
|
||||
- 99a5ca96bb1443e7-EWR
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -68,12 +167,12 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 15 Nov 2025 04:59:45 GMT
|
||||
- Thu, 06 Nov 2025 15:58:16 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=REDACTED;
|
||||
path=/; expires=Sat, 15-Nov-25 05:29:45 GMT; domain=.api.openai.com; HttpOnly;
|
||||
path=/; expires=Thu, 06-Nov-25 16:28:16 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=REDACTED;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
@@ -90,37 +189,31 @@ interactions:
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- REDACTED_ORG
|
||||
- user-REDACTED
|
||||
openai-processing-ms:
|
||||
- '418'
|
||||
- '235'
|
||||
openai-project:
|
||||
- REDACTED_PROJECT
|
||||
- proj_REDACTED
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '434'
|
||||
- '420'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999785'
|
||||
- '200000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999785'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
- '199785'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
- 8.64s
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
- 64ms
|
||||
x-request-id:
|
||||
- REDACTED_REQUEST_ID
|
||||
- req_9810e9721aa9463c930414ab5174ab61
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
@@ -140,7 +233,7 @@ interactions:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
@@ -171,26 +264,25 @@ interactions:
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFNNbxNBDL3nV1hz4bKp8tGkITdEBVRC4oLgAFXkzHg3prP2aGY2aaj6
|
||||
39Fu0mxaisRlpfXze7bHzw8DAMPOLMHYDWZbBz98byfl/bW9mcrH69GX37Kd8v6z/X63Ubz/aoqW
|
||||
oetfZPMT68JqHTxlVjnANhJmalXHV/PpeDG5Wsw6oFZHvqVVIQ8vdViz8HAymlwOR1fD8eLI3ihb
|
||||
SmYJPwYAAA/dt+1THN2bJYyKp0hNKWFFZnlKAjBRfRsxmBKnjJJN0YNWJZN0rd+A6A4sClS8JUCo
|
||||
2rYBJe0oAvyUDyzo4V33v4Rv7Di/SVDiViNnAqteI3AC0QyhWXu2fg9ObVOTZHKACTh3BbYY97DG
|
||||
RA5UIFBM2kqHSCVFEkvpAj7pjrYUC7Ba1yov6iTAWqUCFsdbdg36BFpmEmCxvnEEa99Q0c5AUgCK
|
||||
g0iugHWTIStYlZJjfRoiBbJcsn1RpQAVgp023oEQuSM1NT4DQiTPuPYESZtoCTSC40g2+z1guoMN
|
||||
1xfnbx2pbBK2+5bG+zMARTRj65duy7dH5PG0V69ViLpOL6imZOG0WUXCpNLuMGUNpkMfBwC3nX+a
|
||||
Z5YwIWod8irrHXXlxvPFQc/0tu3R+fwIZs3o+/hkelm8ordylJF9OnOgsWg35Hpqb1dsHOsZMDib
|
||||
+u9uXtM+TM5S/Y98D1hLIZNbhUiO7fOJ+7RI7VX/K+30yl3DJlHcsqVVZortJhyV2PjDrZm0T5nq
|
||||
VclSUQyRDwdXhtVsPsJyTrPZWzN4HPwBAAD//wMAtb7X3X4EAAA=
|
||||
H4sIAAAAAAAAAwAAAP//jFPBahsxEL37KwZderGN7dqO41vaUghtT4FCacIiS7PrSbQaVZq1swT/
|
||||
e9HayTptCr0ING/e6M2b0dMAQJFVa1Bmq8XUwY0+/tiXXy4/2BDN/p7izdY4/vrp29Wvmxbv1TAz
|
||||
eHOPRp5ZY8N1cCjE/gibiFowV51eLGfz+Xx5ueqAmi26TKuCjOY8qsnTaDaZzUeTi9F0dWJvmQwm
|
||||
tYafAwCAp+7MOr3FR7WGyfA5UmNKukK1fkkCUJFdjiidEiXRXtSwBw17Qd9JvwbPezDaQ0U7BA1V
|
||||
lg3apz1GgFv/mbx2cNXd1/CdLMm7BKXecSRBMOw4AiXwLBCajSPjWrBsmhq9oAWOsCeLroUHz3s/
|
||||
husSWm5gq3cIKaChkgx0ih4lZ1sUTS6B3nAjxweHcA21bmGDoDcOQRhC5B3ZLLjmiJApHNFCxBTY
|
||||
Jxyf9xuxbJLOnvvGuTNAe8+i88w6p+9OyOHFW8dViLxJf1BVSZ7StoioE/vsYxIOqkMPA4C7bobN
|
||||
q7GoELkOUgg/YPfcdLk61lP96vTofHEChUW7Pj6bvh++Ua842Xa2Bcpos0XbU/uV0Y0lPgMGZ13/
|
||||
reat2sfOyVf/U74HjMEgaIsQ0ZJ53XGfFjH/rH+lvbjcCVYJ444MFkIY8yQslrpxx31XqU2CdVGS
|
||||
rzCGSMelL0OxWE50ucTF4lINDoPfAAAA//8DAPFGfbMCBAAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 99ec2e59baca3c7d-SJC
|
||||
- 99a5ca9c5ef543e7-EWR
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -198,7 +290,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 15 Nov 2025 04:59:47 GMT
|
||||
- Thu, 06 Nov 2025 15:58:19 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
@@ -214,37 +306,31 @@ interactions:
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- REDACTED_ORG
|
||||
- user-REDACTED
|
||||
openai-processing-ms:
|
||||
- '1471'
|
||||
- '1326'
|
||||
openai-project:
|
||||
- REDACTED_PROJECT
|
||||
- proj_REDACTED
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '1488'
|
||||
- '1754'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999805'
|
||||
- '200000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
- '9998'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999802'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
- '199803'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
- 15.913s
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
- 59ms
|
||||
x-request-id:
|
||||
- REDACTED_REQUEST_ID
|
||||
- req_f975e16b666e498b8bcfdfab525f71b3
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
|
||||
@@ -937,7 +937,7 @@ interactions:
|
||||
"description_updated": "False", "cache_function": "<function BaseTool.<lambda>
|
||||
at 0x10614d3a0>", "result_as_answer": "False", "max_usage_count": "None", "current_usage_count":
|
||||
"0"}], "max_iter": 25, "agent_executor": "<crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x10f6c3bc0>", "llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x10f6c3bc0>", "llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x10f6c27e0>", "crew": {"parent_flow": null, "name": "crew", "cache":
|
||||
true, "tasks": ["{''used_tools'': 0, ''tools_errors'': 0, ''delegations'': 0,
|
||||
''i18n'': {''prompt_file'': None}, ''name'': None, ''prompt_context'': '''',
|
||||
@@ -977,7 +977,7 @@ interactions:
|
||||
''description_updated'': False, ''cache_function'': <function BaseTool.<lambda>
|
||||
at 0x10614d3a0>, ''result_as_answer'': False, ''max_usage_count'': None, ''current_usage_count'':
|
||||
0}], ''max_iter'': 25, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x10f6c3bc0>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x10f6c3bc0>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x10f6c27e0>, ''crew'': Crew(id=49cbb747-f055-4636-bbca-9e8a450c05f6,
|
||||
process=Process.hierarchical, number_of_agents=2, number_of_tasks=1), ''i18n'':
|
||||
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
|
||||
@@ -996,7 +996,7 @@ interactions:
|
||||
''First Agent'', ''goal'': ''First goal'', ''backstory'': ''First backstory'',
|
||||
''cache'': True, ''verbose'': False, ''max_rpm'': None, ''allow_delegation'':
|
||||
False, ''tools'': [], ''max_iter'': 25, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x11059ca10>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x11059ca10>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x10f6e6ae0>, ''crew'': Crew(id=49cbb747-f055-4636-bbca-9e8a450c05f6,
|
||||
process=Process.hierarchical, number_of_agents=2, number_of_tasks=1), ''i18n'':
|
||||
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
|
||||
@@ -1007,7 +1007,7 @@ interactions:
|
||||
''role'': ''Second Agent'', ''goal'': ''Second goal'', ''backstory'': ''Second
|
||||
backstory'', ''cache'': True, ''verbose'': False, ''max_rpm'': None, ''allow_delegation'':
|
||||
False, ''tools'': [], ''max_iter'': 25, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x10f6c3500>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x10f6c3500>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x10f6d2000>, ''crew'': Crew(id=49cbb747-f055-4636-bbca-9e8a450c05f6,
|
||||
process=Process.hierarchical, number_of_agents=2, number_of_tasks=1), ''i18n'':
|
||||
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
|
||||
@@ -1017,7 +1017,7 @@ interactions:
|
||||
False, ''knowledge_config'': None}"], "process": "hierarchical", "verbose":
|
||||
false, "memory": false, "short_term_memory": null, "long_term_memory": null,
|
||||
"entity_memory": null, "external_memory": null, "embedder": null, "usage_metrics":
|
||||
null, "manager_llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
null, "manager_llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x10f6c27e0>", "manager_agent": {"id": "UUID(''b0898472-5e3b-45bb-bd90-05bad0b5a8ce'')",
|
||||
"role": "''Crew Manager''", "goal": "''Manage the team to complete the task
|
||||
in the best way possible.''", "backstory": "\"You are a seasoned manager with
|
||||
@@ -1053,7 +1053,7 @@ interactions:
|
||||
''description_updated'': False, ''cache_function'': <function BaseTool.<lambda>
|
||||
at 0x10614d3a0>, ''result_as_answer'': False, ''max_usage_count'': None, ''current_usage_count'':
|
||||
0}]", "max_iter": "25", "agent_executor": "<crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x10f6c3bc0>", "llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x10f6c3bc0>", "llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x10f6c27e0>", "crew": "Crew(id=49cbb747-f055-4636-bbca-9e8a450c05f6,
|
||||
process=Process.hierarchical, number_of_agents=2, number_of_tasks=1)", "i18n":
|
||||
"{''prompt_file'': None}", "cache_handler": "{}", "tools_handler": "<crewai.agents.tools_handler.ToolsHandler
|
||||
@@ -1805,7 +1805,7 @@ interactions:
|
||||
"description_updated": "False", "cache_function": "<function BaseTool.<lambda>
|
||||
at 0x107e394e0>", "result_as_answer": "False", "max_usage_count": "None", "current_usage_count":
|
||||
"0"}], "max_iter": 25, "agent_executor": "<crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x1388bedb0>", "llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x1388bedb0>", "llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x1388bf710>", "crew": {"parent_flow": null, "name": "crew", "cache":
|
||||
true, "tasks": ["{''used_tools'': 0, ''tools_errors'': 0, ''delegations'': 0,
|
||||
''i18n'': {''prompt_file'': None}, ''name'': None, ''prompt_context'': '''',
|
||||
@@ -1845,7 +1845,7 @@ interactions:
|
||||
''description_updated'': False, ''cache_function'': <function BaseTool.<lambda>
|
||||
at 0x107e394e0>, ''result_as_answer'': False, ''max_usage_count'': None, ''current_usage_count'':
|
||||
0}], ''max_iter'': 25, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x1388bedb0>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x1388bedb0>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x1388bf710>, ''crew'': Crew(id=4d744f3e-0589-4d1d-b1c1-6aa8b52478ac,
|
||||
process=Process.hierarchical, number_of_agents=2, number_of_tasks=1), ''i18n'':
|
||||
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
|
||||
@@ -1864,7 +1864,7 @@ interactions:
|
||||
''First Agent'', ''goal'': ''First goal'', ''backstory'': ''First backstory'',
|
||||
''cache'': True, ''verbose'': False, ''max_rpm'': None, ''allow_delegation'':
|
||||
False, ''tools'': [], ''max_iter'': 25, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x1388d5c70>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x1388d5c70>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x1388bde80>, ''crew'': Crew(id=4d744f3e-0589-4d1d-b1c1-6aa8b52478ac,
|
||||
process=Process.hierarchical, number_of_agents=2, number_of_tasks=1), ''i18n'':
|
||||
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
|
||||
@@ -1875,7 +1875,7 @@ interactions:
|
||||
''role'': ''Second Agent'', ''goal'': ''Second goal'', ''backstory'': ''Second
|
||||
backstory'', ''cache'': True, ''verbose'': False, ''max_rpm'': None, ''allow_delegation'':
|
||||
False, ''tools'': [], ''max_iter'': 25, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x1388bf7d0>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x1388bf7d0>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x1388bfb90>, ''crew'': Crew(id=4d744f3e-0589-4d1d-b1c1-6aa8b52478ac,
|
||||
process=Process.hierarchical, number_of_agents=2, number_of_tasks=1), ''i18n'':
|
||||
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
|
||||
@@ -1885,7 +1885,7 @@ interactions:
|
||||
False, ''knowledge_config'': None}"], "process": "hierarchical", "verbose":
|
||||
false, "memory": false, "short_term_memory": null, "long_term_memory": null,
|
||||
"entity_memory": null, "external_memory": null, "embedder": null, "usage_metrics":
|
||||
null, "manager_llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
null, "manager_llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x1388bf710>", "manager_agent": {"id": "UUID(''09794b42-447f-4b7a-b634-3a861f457357'')",
|
||||
"role": "''Crew Manager''", "goal": "''Manage the team to complete the task
|
||||
in the best way possible.''", "backstory": "\"You are a seasoned manager with
|
||||
@@ -1921,7 +1921,7 @@ interactions:
|
||||
''description_updated'': False, ''cache_function'': <function BaseTool.<lambda>
|
||||
at 0x107e394e0>, ''result_as_answer'': False, ''max_usage_count'': None, ''current_usage_count'':
|
||||
0}]", "max_iter": "25", "agent_executor": "<crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x1388bedb0>", "llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x1388bedb0>", "llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x1388bf710>", "crew": "Crew(id=4d744f3e-0589-4d1d-b1c1-6aa8b52478ac,
|
||||
process=Process.hierarchical, number_of_agents=2, number_of_tasks=1)", "i18n":
|
||||
"{''prompt_file'': None}", "cache_handler": "{}", "tools_handler": "<crewai.agents.tools_handler.ToolsHandler
|
||||
|
||||
@@ -1,104 +1,7 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"trace_id": "3fe0e5a3-1d9c-4604-b3a7-2cd3f16e95f9", "execution_type":
|
||||
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
|
||||
"crew_name": "Unknown Crew", "flow_name": null, "crewai_version": "1.4.1", "privacy_level":
|
||||
"standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count":
|
||||
0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-11-15T04:57:05.245294+00:00"}}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '434'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- CrewAI-CLI/1.4.1
|
||||
X-Crewai-Organization-Id:
|
||||
- 73c2b193-f579-422c-84c7-76a39a1da77f
|
||||
X-Crewai-Version:
|
||||
- 1.4.1
|
||||
method: POST
|
||||
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/batches
|
||||
response:
|
||||
body:
|
||||
string: '{"error":"bad_credentials","message":"Bad credentials"}'
|
||||
headers:
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '55'
|
||||
Content-Type:
|
||||
- application/json; charset=utf-8
|
||||
Date:
|
||||
- Sat, 15 Nov 2025 04:57:05 GMT
|
||||
cache-control:
|
||||
- no-store
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
|
||||
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
|
||||
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
|
||||
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
|
||||
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
|
||||
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
|
||||
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
|
||||
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
|
||||
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
|
||||
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
|
||||
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
|
||||
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
|
||||
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
|
||||
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
|
||||
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
|
||||
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
|
||||
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
|
||||
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
|
||||
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
|
||||
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
|
||||
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
|
||||
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
|
||||
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
|
||||
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
|
||||
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
|
||||
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
|
||||
https://drive.google.com https://slides.google.com https://accounts.google.com
|
||||
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
|
||||
https://www.youtube.com https://share.descript.com'
|
||||
expires:
|
||||
- '0'
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
pragma:
|
||||
- no-cache
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
strict-transport-security:
|
||||
- max-age=63072000; includeSubDomains
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- nosniff
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
x-request-id:
|
||||
- 98dde4ab-199c-4d1c-a059-3d8b9c0c93d3
|
||||
x-runtime:
|
||||
- '0.037564'
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
status:
|
||||
code: 401
|
||||
message: Unauthorized
|
||||
- request:
|
||||
body: '{"messages":[{"role":"user","content":"Say ''Hello, World!''"}],"model":"gpt-3.5-turbo"}'
|
||||
body: '{"messages": [{"role": "user", "content": "Say ''Hello, World!''"}], "model":
|
||||
"gpt-3.5-turbo"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -107,13 +10,16 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '86'
|
||||
- '92'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=rb61BZH2ejzD5YPmLaEJqI7km71QqyNJGTVdNxBq6qk-1727213194-1.0.1.1-pJ49onmgX9IugEMuYQMralzD7oj_6W.CHbSu4Su1z3NyjTGYg.rhgJZWng8feFYah._oSnoYlkTjpK1Wd2C9FA;
|
||||
_cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
- OpenAI/Python 1.47.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -123,31 +29,29 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
- 1.47.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jJJNaxsxEIbv+yvUOa+NP2q78TUQegihTQmGFrPI0nitVKtRpdm2Ifi/
|
||||
F8kfu24SyEUHPfOO3nc0z4UQYDQsBaidZNV4O7hWE31n9Rdz9TCaXd9//dPcLlZhdf999OvmG5RJ
|
||||
QZtHVHxSDRU13iIbcgesAkrG1HW8mE/HnybzySyDhjTaJKs9D6bD2YDbsKHBaDyZHZU7MgojLMWP
|
||||
QgghnvOZPDqNf2EpRuXppsEYZY2wPBcJAYFsugEZo4ksHUPZQUWO0WXbn9FaKsWKgtUf+jUBt22U
|
||||
yaNrre0B6RyxTBmzu/WR7M9+LNU+0Cb+J4WtcSbuqoAykktvRyYPme4LIdY5d3sRBXygxnPF9BPz
|
||||
c+PpoR10k+7gxyNjYml7mkX5SrNKI0tjY29soKTaoe6U3Yxlqw31QNGL/NLLa70PsY2r39O+A0qh
|
||||
Z9SVD6iNuszblQVMa/hW2XnE2TBEDL+NwooNhvQNGreytYcFgfgUGZtqa1yNwQeTtyR9Y7Ev/gEA
|
||||
AP//AwAqA1omJAMAAA==
|
||||
content: "{\n \"id\": \"chatcmpl-AB7WOl4G3lFflxNyRE5fAnkueUNWp\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727213884,\n \"model\": \"gpt-3.5-turbo-0125\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Hello, World!\",\n \"refusal\":
|
||||
null\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 13,\n \"completion_tokens\":
|
||||
4,\n \"total_tokens\": 17,\n \"completion_tokens_details\": {\n \"reasoning_tokens\":
|
||||
0\n }\n },\n \"system_fingerprint\": null\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 99ec2a70de42f9e4-SJC
|
||||
- 8c85eb570b271cf3-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -155,39 +59,23 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 15 Nov 2025 04:57:05 GMT
|
||||
- Tue, 24 Sep 2024 21:38:04 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=REDACTED;
|
||||
path=/; expires=Sat, 15-Nov-25 05:27:05 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=REDACTED;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- REDACTED_ORG
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '162'
|
||||
openai-project:
|
||||
- REDACTED_PROJECT
|
||||
- '170'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '183'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
@@ -195,14 +83,93 @@ interactions:
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '49999993'
|
||||
- '49999978'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- REDACTED_REQUEST_ID
|
||||
- req_c504d56aee4210a9911e1b90551f1e46
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"trace_id": "9d3dfee1-ebe8-4eb3-aa28-e77448706cb5", "execution_type":
|
||||
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
|
||||
"crew_name": "Unknown Crew", "flow_name": null, "crewai_version": "0.193.2",
|
||||
"privacy_level": "standard"}, "execution_metadata": {"expected_duration_estimate":
|
||||
300, "agent_count": 0, "task_count": 0, "flow_method_count": 0, "execution_started_at":
|
||||
"2025-09-24T05:36:10.874552+00:00"}}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '436'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- CrewAI-CLI/0.193.2
|
||||
X-Crewai-Organization-Id:
|
||||
- d3a3d10c-35db-423f-a7a4-c026030ba64d
|
||||
X-Crewai-Version:
|
||||
- 0.193.2
|
||||
method: POST
|
||||
uri: http://localhost:3000/crewai_plus/api/v1/tracing/batches
|
||||
response:
|
||||
body:
|
||||
string: '{"id":"bc65d267-2f55-4edd-9277-61486245c5f6","trace_id":"9d3dfee1-ebe8-4eb3-aa28-e77448706cb5","execution_type":"crew","crew_name":"Unknown
|
||||
Crew","flow_name":null,"status":"running","duration_ms":null,"crewai_version":"0.193.2","privacy_level":"standard","total_events":0,"execution_context":{"crew_fingerprint":null,"crew_name":"Unknown
|
||||
Crew","flow_name":null,"crewai_version":"0.193.2","privacy_level":"standard"},"created_at":"2025-09-24T05:36:11.292Z","updated_at":"2025-09-24T05:36:11.292Z"}'
|
||||
headers:
|
||||
Content-Length:
|
||||
- '496'
|
||||
cache-control:
|
||||
- max-age=0, private, must-revalidate
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.crewai.com crewai.com; script-src ''self'' ''unsafe-inline''
|
||||
*.crewai.com crewai.com https://cdn.jsdelivr.net/npm/apexcharts https://www.gstatic.com
|
||||
https://run.pstmn.io https://share.descript.com/; style-src ''self'' ''unsafe-inline''
|
||||
*.crewai.com crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self''
|
||||
data: *.crewai.com crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
|
||||
https://cdn.jsdelivr.net; font-src ''self'' data: *.crewai.com crewai.com;
|
||||
connect-src ''self'' *.crewai.com crewai.com https://zeus.tools.crewai.com
|
||||
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
|
||||
https://run.pstmn.io https://connect.tools.crewai.com/ ws://localhost:3036
|
||||
wss://localhost:3036; frame-src ''self'' *.crewai.com crewai.com https://connect.useparagon.com/
|
||||
https://zeus.tools.crewai.com https://zeus.useparagon.com/* https://connect.tools.crewai.com/
|
||||
https://www.youtube.com https://share.descript.com'
|
||||
content-type:
|
||||
- application/json; charset=utf-8
|
||||
etag:
|
||||
- W/"43353f343ab1e228123d1a9c9a4b6e7c"
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
server-timing:
|
||||
- cache_read.active_support;dur=0.09, cache_fetch_hit.active_support;dur=0.00,
|
||||
cache_read_multi.active_support;dur=0.08, start_processing.action_controller;dur=0.00,
|
||||
sql.active_record;dur=24.53, instantiation.active_record;dur=1.01, feature_operation.flipper;dur=0.07,
|
||||
start_transaction.active_record;dur=0.02, transaction.active_record;dur=24.66,
|
||||
process_action.action_controller;dur=399.97
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- nosniff
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
x-request-id:
|
||||
- 256ac03e-f7ae-4e03-b5e0-31bd179a7afc
|
||||
x-runtime:
|
||||
- '0.422765'
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
code: 201
|
||||
message: Created
|
||||
version: 1
|
||||
|
||||
@@ -81,9 +81,11 @@ interactions:
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=REDACTED; path=/; expires=Wed, 05-Nov-25 22:40:59 GMT; domain=.api.openai.com;
|
||||
HttpOnly; Secure; SameSite=None
|
||||
- _cfuvid=REDACTED; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
- __cf_bm=REDACTED;
|
||||
path=/; expires=Wed, 05-Nov-25 22:40:59 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=REDACTED;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
@@ -125,105 +127,4 @@ interactions:
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"trace_id": "c682f49d-bb6b-49d9-84b7-06e1881d37cd", "execution_type":
|
||||
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
|
||||
"crew_name": "crew", "flow_name": null, "crewai_version": "1.4.1", "privacy_level":
|
||||
"standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count":
|
||||
0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-11-15T21:20:09.431751+00:00"},
|
||||
"ephemeral_trace_id": "c682f49d-bb6b-49d9-84b7-06e1881d37cd"}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '488'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- CrewAI-CLI/1.4.1
|
||||
X-Crewai-Organization-Id:
|
||||
- 73c2b193-f579-422c-84c7-76a39a1da77f
|
||||
X-Crewai-Version:
|
||||
- 1.4.1
|
||||
method: POST
|
||||
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches
|
||||
response:
|
||||
body:
|
||||
string: '{"id":"25f0f0b3-90bb-4e2a-bde5-817920201bf1","ephemeral_trace_id":"c682f49d-bb6b-49d9-84b7-06e1881d37cd","execution_type":"crew","crew_name":"crew","flow_name":null,"status":"running","duration_ms":null,"crewai_version":"1.4.1","total_events":0,"execution_context":{"crew_fingerprint":null,"crew_name":"crew","flow_name":null,"crewai_version":"1.4.1","privacy_level":"standard"},"created_at":"2025-11-15T21:20:09.594Z","updated_at":"2025-11-15T21:20:09.594Z","access_code":"TRACE-1fb0209738","user_identifier":null}'
|
||||
headers:
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '515'
|
||||
Content-Type:
|
||||
- application/json; charset=utf-8
|
||||
Date:
|
||||
- Sat, 15 Nov 2025 21:20:09 GMT
|
||||
cache-control:
|
||||
- no-store
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
|
||||
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
|
||||
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
|
||||
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
|
||||
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
|
||||
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
|
||||
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
|
||||
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
|
||||
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
|
||||
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
|
||||
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
|
||||
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
|
||||
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
|
||||
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
|
||||
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
|
||||
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
|
||||
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
|
||||
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
|
||||
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
|
||||
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
|
||||
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
|
||||
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
|
||||
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
|
||||
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
|
||||
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
|
||||
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
|
||||
https://drive.google.com https://slides.google.com https://accounts.google.com
|
||||
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
|
||||
https://www.youtube.com https://share.descript.com'
|
||||
etag:
|
||||
- W/"e8d1e903c8c6ec2f765163c0c03bed79"
|
||||
expires:
|
||||
- '0'
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
pragma:
|
||||
- no-cache
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
strict-transport-security:
|
||||
- max-age=63072000; includeSubDomains
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- nosniff
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
x-request-id:
|
||||
- 5ea5f513-c359-4a92-a84a-08ad44d9857b
|
||||
x-runtime:
|
||||
- '0.044665'
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
status:
|
||||
code: 201
|
||||
message: Created
|
||||
version: 1
|
||||
|
||||
@@ -126,7 +126,7 @@ interactions:
|
||||
a freelancer and is now working on doing research and analysis for a new customer.\",
|
||||
''cache'': True, ''verbose'': False, ''max_rpm'': None, ''allow_delegation'':
|
||||
False, ''tools'': [], ''max_iter'': 25, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x12b973fe0>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x12b973fe0>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x12b910290>, ''crew'': None, ''i18n'': {''prompt_file'': None}, ''cache_handler'':
|
||||
{}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler object at 0x12b9934d0>,
|
||||
''tools_results'': [], ''max_tokens'': None, ''knowledge'': None, ''knowledge_sources'':
|
||||
@@ -149,7 +149,7 @@ interactions:
|
||||
writing content for a new customer.\", ''cache'': True, ''verbose'': False,
|
||||
''max_rpm'': None, ''allow_delegation'': False, ''tools'': [], ''max_iter'':
|
||||
25, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x12b7bbbf0>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x12b7bbbf0>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x12b9903b0>, ''crew'': None, ''i18n'': {''prompt_file'': None}, ''cache_handler'':
|
||||
{}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler object at 0x12b631bb0>,
|
||||
''tools_results'': [], ''max_tokens'': None, ''knowledge'': None, ''knowledge_sources'':
|
||||
@@ -169,7 +169,7 @@ interactions:
|
||||
a freelancer and is now working on doing research and analysis for a new customer.\"",
|
||||
"cache": "True", "verbose": "False", "max_rpm": "None", "allow_delegation":
|
||||
"False", "tools": "[]", "max_iter": "25", "agent_executor": "<crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x12b973fe0>", "llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x12b973fe0>", "llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x12b910290>", "crew": "None", "i18n": "{''prompt_file'': None}",
|
||||
"cache_handler": "{}", "tools_handler": "<crewai.agents.tools_handler.ToolsHandler
|
||||
object at 0x12b9934d0>", "tools_results": "[]", "max_tokens": "None", "knowledge":
|
||||
@@ -182,7 +182,7 @@ interactions:
|
||||
You work as a freelancer and are now working on writing content for a new customer.\"",
|
||||
"cache": "True", "verbose": "False", "max_rpm": "None", "allow_delegation":
|
||||
"False", "tools": "[]", "max_iter": "25", "agent_executor": "<crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x12b7bbbf0>", "llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x12b7bbbf0>", "llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x12b9903b0>", "crew": "None", "i18n": "{''prompt_file'': None}",
|
||||
"cache_handler": "{}", "tools_handler": "<crewai.agents.tools_handler.ToolsHandler
|
||||
object at 0x12b631bb0>", "tools_results": "[]", "max_tokens": "None", "knowledge":
|
||||
@@ -214,7 +214,7 @@ interactions:
|
||||
a freelancer and is now working on doing research and analysis for a new customer.\",
|
||||
''cache'': True, ''verbose'': False, ''max_rpm'': None, ''allow_delegation'':
|
||||
False, ''tools'': [], ''max_iter'': 25, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x12b973fe0>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x12b973fe0>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x12b910290>, ''crew'': None, ''i18n'': {''prompt_file'': None}, ''cache_handler'':
|
||||
{}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler object at 0x12b9934d0>,
|
||||
''tools_results'': [], ''max_tokens'': None, ''knowledge'': None, ''knowledge_sources'':
|
||||
@@ -237,7 +237,7 @@ interactions:
|
||||
writing content for a new customer.\", ''cache'': True, ''verbose'': False,
|
||||
''max_rpm'': None, ''allow_delegation'': False, ''tools'': [], ''max_iter'':
|
||||
25, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x12b7bbbf0>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x12b7bbbf0>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x12b9903b0>, ''crew'': None, ''i18n'': {''prompt_file'': None}, ''cache_handler'':
|
||||
{}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler object at 0x12b631bb0>,
|
||||
''tools_results'': [], ''max_tokens'': None, ''knowledge'': None, ''knowledge_sources'':
|
||||
@@ -257,7 +257,7 @@ interactions:
|
||||
a freelancer and is now working on doing research and analysis for a new customer.\"",
|
||||
"cache": "True", "verbose": "False", "max_rpm": "None", "allow_delegation":
|
||||
"False", "tools": "[]", "max_iter": "25", "agent_executor": "<crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x12b973fe0>", "llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x12b973fe0>", "llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x12b910290>", "crew": "None", "i18n": "{''prompt_file'': None}",
|
||||
"cache_handler": "{}", "tools_handler": "<crewai.agents.tools_handler.ToolsHandler
|
||||
object at 0x12b9934d0>", "tools_results": "[]", "max_tokens": "None", "knowledge":
|
||||
@@ -270,7 +270,7 @@ interactions:
|
||||
You work as a freelancer and are now working on writing content for a new customer.\"",
|
||||
"cache": "True", "verbose": "False", "max_rpm": "None", "allow_delegation":
|
||||
"False", "tools": "[]", "max_iter": "25", "agent_executor": "<crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x12b7bbbf0>", "llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x12b7bbbf0>", "llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x12b9903b0>", "crew": "None", "i18n": "{''prompt_file'': None}",
|
||||
"cache_handler": "{}", "tools_handler": "<crewai.agents.tools_handler.ToolsHandler
|
||||
object at 0x12b631bb0>", "tools_results": "[]", "max_tokens": "None", "knowledge":
|
||||
|
||||
@@ -468,7 +468,7 @@ interactions:
|
||||
"description_updated": "False", "cache_function": "<function BaseTool.<lambda>
|
||||
at 0x107ff9440>", "result_as_answer": "True", "max_usage_count": "None", "current_usage_count":
|
||||
"0"}], "max_iter": 25, "agent_executor": "<crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x13ab2e030>", "llm": "<crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13ab2e030>", "llm": "<crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13ab2e5d0>", "crew": {"parent_flow": null, "name": "crew", "cache":
|
||||
true, "tasks": ["{''used_tools'': 0, ''tools_errors'': 0, ''delegations'': 0,
|
||||
''i18n'': {''prompt_file'': None}, ''name'': None, ''prompt_context'': '''',
|
||||
@@ -484,7 +484,7 @@ interactions:
|
||||
<class ''abc.MyCustomToolSchema''>, ''description_updated'': False, ''cache_function'':
|
||||
<function BaseTool.<lambda> at 0x107ff9440>, ''result_as_answer'': True, ''max_usage_count'':
|
||||
None, ''current_usage_count'': 0}], ''max_iter'': 25, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x13ab2e030>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13ab2e030>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13ab2e5d0>, ''crew'': Crew(id=f74956dd-60d0-402a-a703-2cc3d767397f,
|
||||
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
|
||||
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
|
||||
@@ -512,7 +512,7 @@ interactions:
|
||||
''description_updated'': False, ''cache_function'': <function BaseTool.<lambda>
|
||||
at 0x107ff9440>, ''result_as_answer'': True, ''max_usage_count'': None, ''current_usage_count'':
|
||||
0}], ''max_iter'': 25, ''agent_executor'': <crewai.agents.crew_agent_executor.CrewAgentExecutor
|
||||
object at 0x13ab2e030>, ''llm'': <crewai.llms.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13ab2e030>, ''llm'': <crewai.llm.providers.openai.completion.OpenAICompletion
|
||||
object at 0x13ab2e5d0>, ''crew'': Crew(id=f74956dd-60d0-402a-a703-2cc3d767397f,
|
||||
process=Process.sequential, number_of_agents=1, number_of_tasks=1), ''i18n'':
|
||||
{''prompt_file'': None}, ''cache_handler'': {}, ''tools_handler'': <crewai.agents.tools_handler.ToolsHandler
|
||||
|
||||
@@ -1,212 +0,0 @@
|
||||
"""Tests for crew_chat.py environment variable loading."""
|
||||
|
||||
import os
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai.cli.crew_chat import load_crew_and_name
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def temp_crew_project(tmp_path):
|
||||
"""Create a temporary crew project with .env file."""
|
||||
project_dir = tmp_path / "test_crew"
|
||||
project_dir.mkdir()
|
||||
|
||||
src_dir = project_dir / "src" / "test_crew"
|
||||
src_dir.mkdir(parents=True)
|
||||
|
||||
env_file = project_dir / ".env"
|
||||
env_file.write_text("OPENAI_API_KEY=test-api-key-from-env\nMODEL=gpt-4\n")
|
||||
|
||||
pyproject = project_dir / "pyproject.toml"
|
||||
pyproject.write_text("""[project]
|
||||
name = "test_crew"
|
||||
version = "0.1.0"
|
||||
description = "Test crew"
|
||||
requires-python = ">=3.10"
|
||||
dependencies = ["crewai"]
|
||||
|
||||
[tool.crewai]
|
||||
type = "crew"
|
||||
""")
|
||||
|
||||
(src_dir / "__init__.py").write_text("")
|
||||
|
||||
crew_py = src_dir / "crew.py"
|
||||
crew_py.write_text("""from crewai import Agent, Crew, Process, Task, LLM
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
|
||||
default_llm = LLM(model="openai/gpt-4")
|
||||
|
||||
@CrewBase
|
||||
class TestCrew:
|
||||
'''Test crew'''
|
||||
|
||||
@agent
|
||||
def researcher(self) -> Agent:
|
||||
return Agent(
|
||||
role="Researcher",
|
||||
goal="Research topics",
|
||||
backstory="You are a researcher",
|
||||
llm=default_llm,
|
||||
)
|
||||
|
||||
@task
|
||||
def research_task(self) -> Task:
|
||||
return Task(
|
||||
description="Research {topic}",
|
||||
expected_output="A report",
|
||||
agent=self.researcher(),
|
||||
)
|
||||
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
return Crew(
|
||||
agents=[self.researcher()],
|
||||
tasks=[self.research_task()],
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
)
|
||||
""")
|
||||
|
||||
config_dir = src_dir / "config"
|
||||
config_dir.mkdir()
|
||||
|
||||
agents_yaml = config_dir / "agents.yaml"
|
||||
agents_yaml.write_text("""researcher:
|
||||
role: Researcher
|
||||
goal: Research topics
|
||||
backstory: You are a researcher
|
||||
""")
|
||||
|
||||
tasks_yaml = config_dir / "tasks.yaml"
|
||||
tasks_yaml.write_text("""research_task:
|
||||
description: Research {topic}
|
||||
expected_output: A report
|
||||
agent: researcher
|
||||
""")
|
||||
|
||||
return project_dir
|
||||
|
||||
|
||||
def test_load_crew_with_env_file(temp_crew_project, monkeypatch):
|
||||
"""Test that load_crew_and_name loads .env before importing crew module."""
|
||||
monkeypatch.chdir(temp_crew_project)
|
||||
|
||||
monkeypatch.delenv("OPENAI_API_KEY", raising=False)
|
||||
|
||||
with patch("crewai.llm.LLM") as mock_llm:
|
||||
mock_llm.return_value = Mock()
|
||||
|
||||
crew_instance, crew_name = load_crew_and_name()
|
||||
|
||||
assert crew_instance is not None
|
||||
assert crew_name == "TestCrew"
|
||||
|
||||
assert os.environ.get("OPENAI_API_KEY") == "test-api-key-from-env"
|
||||
assert os.environ.get("MODEL") == "gpt-4"
|
||||
|
||||
|
||||
def test_env_var_precedence(temp_crew_project, monkeypatch):
|
||||
"""Test that existing environment variables are not overridden by .env."""
|
||||
monkeypatch.chdir(temp_crew_project)
|
||||
|
||||
existing_key = "existing-api-key-from-shell"
|
||||
monkeypatch.setenv("OPENAI_API_KEY", existing_key)
|
||||
|
||||
with patch("crewai.llm.LLM") as mock_llm:
|
||||
mock_llm.return_value = Mock()
|
||||
|
||||
crew_instance, crew_name = load_crew_and_name()
|
||||
|
||||
assert crew_instance is not None
|
||||
assert crew_name == "TestCrew"
|
||||
|
||||
assert os.environ.get("OPENAI_API_KEY") == existing_key
|
||||
|
||||
assert os.environ.get("MODEL") == "gpt-4"
|
||||
|
||||
|
||||
def test_load_crew_without_env_file(tmp_path, monkeypatch):
|
||||
"""Test that load_crew_and_name works even without .env file."""
|
||||
project_dir = tmp_path / "test_crew_no_env"
|
||||
project_dir.mkdir()
|
||||
|
||||
src_dir = project_dir / "src" / "test_crew_no_env"
|
||||
src_dir.mkdir(parents=True)
|
||||
|
||||
pyproject = project_dir / "pyproject.toml"
|
||||
pyproject.write_text("""[project]
|
||||
name = "test_crew_no_env"
|
||||
version = "0.1.0"
|
||||
description = "Test crew without env"
|
||||
requires-python = ">=3.10"
|
||||
dependencies = ["crewai"]
|
||||
|
||||
[tool.crewai]
|
||||
type = "crew"
|
||||
""")
|
||||
|
||||
(src_dir / "__init__.py").write_text("")
|
||||
|
||||
crew_py = src_dir / "crew.py"
|
||||
crew_py.write_text("""from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
|
||||
@CrewBase
|
||||
class TestCrewNoEnv:
|
||||
'''Test crew without env'''
|
||||
|
||||
@agent
|
||||
def researcher(self) -> Agent:
|
||||
return Agent(
|
||||
role="Researcher",
|
||||
goal="Research topics",
|
||||
backstory="You are a researcher",
|
||||
)
|
||||
|
||||
@task
|
||||
def research_task(self) -> Task:
|
||||
return Task(
|
||||
description="Research {topic}",
|
||||
expected_output="A report",
|
||||
agent=self.researcher(),
|
||||
)
|
||||
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
return Crew(
|
||||
agents=[self.researcher()],
|
||||
tasks=[self.research_task()],
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
)
|
||||
""")
|
||||
|
||||
config_dir = src_dir / "config"
|
||||
config_dir.mkdir()
|
||||
|
||||
agents_yaml = config_dir / "agents.yaml"
|
||||
agents_yaml.write_text("""researcher:
|
||||
role: Researcher
|
||||
goal: Research topics
|
||||
backstory: You are a researcher
|
||||
""")
|
||||
|
||||
tasks_yaml = config_dir / "tasks.yaml"
|
||||
tasks_yaml.write_text("""research_task:
|
||||
description: Research {topic}
|
||||
expected_output: A report
|
||||
agent: researcher
|
||||
""")
|
||||
|
||||
monkeypatch.chdir(project_dir)
|
||||
|
||||
monkeypatch.setenv("OPENAI_API_KEY", "test-key")
|
||||
|
||||
crew_instance, crew_name = load_crew_and_name()
|
||||
|
||||
assert crew_instance is not None
|
||||
assert crew_name == "TestCrewNoEnv"
|
||||
@@ -34,7 +34,7 @@ def test_anthropic_completion_is_used_when_claude_provider():
|
||||
"""
|
||||
llm = LLM(model="claude/claude-3-5-sonnet-20241022")
|
||||
|
||||
from crewai.llms.providers.anthropic.completion import AnthropicCompletion
|
||||
from crewai.llm.providers.anthropic.completion import AnthropicCompletion
|
||||
assert isinstance(llm, AnthropicCompletion)
|
||||
assert llm.provider == "anthropic"
|
||||
assert llm.model == "claude-3-5-sonnet-20241022"
|
||||
@@ -47,7 +47,7 @@ def test_anthropic_tool_use_conversation_flow():
|
||||
Test that the Anthropic completion properly handles tool use conversation flow
|
||||
"""
|
||||
from unittest.mock import Mock, patch
|
||||
from crewai.llms.providers.anthropic.completion import AnthropicCompletion
|
||||
from crewai.llm.providers.anthropic.completion import AnthropicCompletion
|
||||
from anthropic.types.tool_use_block import ToolUseBlock
|
||||
|
||||
# Create AnthropicCompletion instance
|
||||
@@ -60,7 +60,7 @@ def test_anthropic_tool_use_conversation_flow():
|
||||
available_functions = {"get_weather": mock_weather_tool}
|
||||
|
||||
# Mock the Anthropic client responses
|
||||
with patch.object(completion.client.messages, 'create') as mock_create:
|
||||
with patch.object(completion._client.messages, 'create') as mock_create:
|
||||
# Mock initial response with tool use - need to properly mock ToolUseBlock
|
||||
mock_tool_use = Mock(spec=ToolUseBlock)
|
||||
mock_tool_use.id = "tool_123"
|
||||
@@ -123,7 +123,7 @@ def test_anthropic_completion_module_is_imported():
|
||||
"""
|
||||
Test that the completion module is properly imported when using Anthropic provider
|
||||
"""
|
||||
module_name = "crewai.llms.providers.anthropic.completion"
|
||||
module_name = "crewai.llm.providers.anthropic.completion"
|
||||
|
||||
# Remove module from cache if it exists
|
||||
if module_name in sys.modules:
|
||||
@@ -175,7 +175,7 @@ def test_anthropic_completion_initialization_parameters():
|
||||
api_key="test-key"
|
||||
)
|
||||
|
||||
from crewai.llms.providers.anthropic.completion import AnthropicCompletion
|
||||
from crewai.llm.providers.anthropic.completion import AnthropicCompletion
|
||||
assert isinstance(llm, AnthropicCompletion)
|
||||
assert llm.model == "claude-3-5-sonnet-20241022"
|
||||
assert llm.temperature == 0.7
|
||||
@@ -195,12 +195,12 @@ def test_anthropic_specific_parameters():
|
||||
timeout=60
|
||||
)
|
||||
|
||||
from crewai.llms.providers.anthropic.completion import AnthropicCompletion
|
||||
from crewai.llm.providers.anthropic.completion import AnthropicCompletion
|
||||
assert isinstance(llm, AnthropicCompletion)
|
||||
assert llm.stop_sequences == ["Human:", "Assistant:"]
|
||||
assert llm.stop == ["Human:", "Assistant:"]
|
||||
assert llm.stream == True
|
||||
assert llm.client.max_retries == 5
|
||||
assert llm.client.timeout == 60
|
||||
assert llm._client.max_retries == 5
|
||||
assert llm._client.timeout == 60
|
||||
|
||||
|
||||
def test_anthropic_completion_call():
|
||||
@@ -390,7 +390,7 @@ def test_anthropic_raises_error_when_model_not_supported():
|
||||
"""Test that AnthropicCompletion raises ValueError when model not supported"""
|
||||
|
||||
# Mock the Anthropic client to raise an error
|
||||
with patch('crewai.llms.providers.anthropic.completion.Anthropic') as mock_anthropic_class:
|
||||
with patch('crewai.llm.providers.anthropic.completion.Anthropic') as mock_anthropic_class:
|
||||
mock_client = MagicMock()
|
||||
mock_anthropic_class.return_value = mock_client
|
||||
|
||||
@@ -427,7 +427,7 @@ def test_anthropic_client_params_setup():
|
||||
client_params=custom_client_params
|
||||
)
|
||||
|
||||
from crewai.llms.providers.anthropic.completion import AnthropicCompletion
|
||||
from crewai.llm.providers.anthropic.completion import AnthropicCompletion
|
||||
assert isinstance(llm, AnthropicCompletion)
|
||||
|
||||
assert llm.client_params == custom_client_params
|
||||
@@ -462,7 +462,7 @@ def test_anthropic_client_params_override_defaults():
|
||||
)
|
||||
|
||||
# Verify this is actually AnthropicCompletion, not LiteLLM fallback
|
||||
from crewai.llms.providers.anthropic.completion import AnthropicCompletion
|
||||
from crewai.llm.providers.anthropic.completion import AnthropicCompletion
|
||||
assert isinstance(llm, AnthropicCompletion)
|
||||
|
||||
merged_params = llm._get_client_params()
|
||||
@@ -487,7 +487,7 @@ def test_anthropic_client_params_none():
|
||||
client_params=None
|
||||
)
|
||||
|
||||
from crewai.llms.providers.anthropic.completion import AnthropicCompletion
|
||||
from crewai.llm.providers.anthropic.completion import AnthropicCompletion
|
||||
assert isinstance(llm, AnthropicCompletion)
|
||||
|
||||
assert llm.client_params is None
|
||||
@@ -515,7 +515,7 @@ def test_anthropic_client_params_empty_dict():
|
||||
client_params={}
|
||||
)
|
||||
|
||||
from crewai.llms.providers.anthropic.completion import AnthropicCompletion
|
||||
from crewai.llm.providers.anthropic.completion import AnthropicCompletion
|
||||
assert isinstance(llm, AnthropicCompletion)
|
||||
|
||||
assert llm.client_params == {}
|
||||
@@ -538,7 +538,7 @@ def test_anthropic_model_detection():
|
||||
|
||||
for model_name in anthropic_test_cases:
|
||||
llm = LLM(model=model_name)
|
||||
from crewai.llms.providers.anthropic.completion import AnthropicCompletion
|
||||
from crewai.llm.providers.anthropic.completion import AnthropicCompletion
|
||||
assert isinstance(llm, AnthropicCompletion), f"Failed for model: {model_name}"
|
||||
|
||||
|
||||
@@ -637,8 +637,8 @@ def test_anthropic_environment_variable_api_key():
|
||||
with patch.dict(os.environ, {"ANTHROPIC_API_KEY": "test-anthropic-key"}):
|
||||
llm = LLM(model="anthropic/claude-3-5-sonnet-20241022")
|
||||
|
||||
assert llm.client is not None
|
||||
assert hasattr(llm.client, 'messages')
|
||||
assert llm._client is not None
|
||||
assert hasattr(llm._client, 'messages')
|
||||
|
||||
|
||||
def test_anthropic_token_usage_tracking():
|
||||
@@ -648,7 +648,7 @@ def test_anthropic_token_usage_tracking():
|
||||
llm = LLM(model="anthropic/claude-3-5-sonnet-20241022")
|
||||
|
||||
# Mock the Anthropic response with usage information
|
||||
with patch.object(llm.client.messages, 'create') as mock_create:
|
||||
with patch.object(llm._client.messages, 'create') as mock_create:
|
||||
mock_response = MagicMock()
|
||||
mock_response.content = [MagicMock(text="test response")]
|
||||
mock_response.usage = MagicMock(input_tokens=50, output_tokens=25)
|
||||
@@ -667,23 +667,21 @@ def test_anthropic_token_usage_tracking():
|
||||
|
||||
|
||||
def test_anthropic_stop_sequences_sync():
|
||||
"""Test that stop and stop_sequences attributes stay synchronized."""
|
||||
"""Test that stop sequences can be set and retrieved correctly."""
|
||||
llm = LLM(model="anthropic/claude-3-5-sonnet-20241022")
|
||||
|
||||
# Test setting stop as a list
|
||||
llm.stop = ["\nObservation:", "\nThought:"]
|
||||
assert llm.stop_sequences == ["\nObservation:", "\nThought:"]
|
||||
assert llm.stop == ["\nObservation:", "\nThought:"]
|
||||
|
||||
# Test setting stop as a string
|
||||
# Test setting stop as a string - note: setting via attribute doesn't go through validator
|
||||
# so it stays as a string
|
||||
llm.stop = "\nFinal Answer:"
|
||||
assert llm.stop_sequences == ["\nFinal Answer:"]
|
||||
assert llm.stop == ["\nFinal Answer:"]
|
||||
assert llm.stop == "\nFinal Answer:"
|
||||
|
||||
# Test setting stop as None
|
||||
llm.stop = None
|
||||
assert llm.stop_sequences == []
|
||||
assert llm.stop == []
|
||||
assert llm.stop is None
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization", "x-api-key"])
|
||||
|
||||
@@ -37,7 +37,7 @@ def test_azure_completion_is_used_when_azure_openai_provider():
|
||||
"""
|
||||
llm = LLM(model="azure_openai/gpt-4")
|
||||
|
||||
from crewai.llms.providers.azure.completion import AzureCompletion
|
||||
from crewai.llm.providers.azure.completion import AzureCompletion
|
||||
assert isinstance(llm, AzureCompletion)
|
||||
assert llm.provider == "azure"
|
||||
assert llm.model == "gpt-4"
|
||||
@@ -47,7 +47,7 @@ def test_azure_tool_use_conversation_flow():
|
||||
"""
|
||||
Test that the Azure completion properly handles tool use conversation flow
|
||||
"""
|
||||
from crewai.llms.providers.azure.completion import AzureCompletion
|
||||
from crewai.llm.providers.azure.completion import AzureCompletion
|
||||
from azure.ai.inference.models import ChatCompletionsToolCall
|
||||
|
||||
# Create AzureCompletion instance
|
||||
@@ -64,7 +64,7 @@ def test_azure_tool_use_conversation_flow():
|
||||
available_functions = {"get_weather": mock_weather_tool}
|
||||
|
||||
# Mock the Azure client responses
|
||||
with patch.object(completion.client, 'complete') as mock_complete:
|
||||
with patch.object(completion._client, 'complete') as mock_complete:
|
||||
# Mock tool call in response with proper type
|
||||
mock_tool_call = MagicMock(spec=ChatCompletionsToolCall)
|
||||
mock_tool_call.function.name = "get_weather"
|
||||
@@ -105,7 +105,7 @@ def test_azure_completion_module_is_imported():
|
||||
"""
|
||||
Test that the completion module is properly imported when using Azure provider
|
||||
"""
|
||||
module_name = "crewai.llms.providers.azure.completion"
|
||||
module_name = "crewai.llm.providers.azure.completion"
|
||||
|
||||
# Remove module from cache if it exists
|
||||
if module_name in sys.modules:
|
||||
@@ -160,7 +160,7 @@ def test_azure_completion_initialization_parameters():
|
||||
endpoint="https://test.openai.azure.com"
|
||||
)
|
||||
|
||||
from crewai.llms.providers.azure.completion import AzureCompletion
|
||||
from crewai.llm.providers.azure.completion import AzureCompletion
|
||||
assert isinstance(llm, AzureCompletion)
|
||||
assert llm.model == "gpt-4"
|
||||
assert llm.temperature == 0.7
|
||||
@@ -182,7 +182,7 @@ def test_azure_specific_parameters():
|
||||
endpoint="https://test.openai.azure.com"
|
||||
)
|
||||
|
||||
from crewai.llms.providers.azure.completion import AzureCompletion
|
||||
from crewai.llm.providers.azure.completion import AzureCompletion
|
||||
assert isinstance(llm, AzureCompletion)
|
||||
assert llm.stop == ["Human:", "Assistant:"]
|
||||
assert llm.stream == True
|
||||
@@ -374,7 +374,7 @@ def test_azure_completion_with_tools():
|
||||
|
||||
def test_azure_raises_error_when_endpoint_missing():
|
||||
"""Test that AzureCompletion raises ValueError when endpoint is missing"""
|
||||
from crewai.llms.providers.azure.completion import AzureCompletion
|
||||
from crewai.llm.providers.azure.completion import AzureCompletion
|
||||
|
||||
# Clear environment variables
|
||||
with patch.dict(os.environ, {}, clear=True):
|
||||
@@ -383,7 +383,7 @@ def test_azure_raises_error_when_endpoint_missing():
|
||||
|
||||
def test_azure_raises_error_when_api_key_missing():
|
||||
"""Test that AzureCompletion raises ValueError when API key is missing"""
|
||||
from crewai.llms.providers.azure.completion import AzureCompletion
|
||||
from crewai.llm.providers.azure.completion import AzureCompletion
|
||||
|
||||
# Clear environment variables
|
||||
with patch.dict(os.environ, {}, clear=True):
|
||||
@@ -400,7 +400,7 @@ def test_azure_endpoint_configuration():
|
||||
}):
|
||||
llm = LLM(model="azure/gpt-4")
|
||||
|
||||
from crewai.llms.providers.azure.completion import AzureCompletion
|
||||
from crewai.llm.providers.azure.completion import AzureCompletion
|
||||
assert isinstance(llm, AzureCompletion)
|
||||
assert llm.endpoint == "https://test1.openai.azure.com/openai/deployments/gpt-4"
|
||||
|
||||
@@ -426,7 +426,7 @@ def test_azure_api_key_configuration():
|
||||
}):
|
||||
llm = LLM(model="azure/gpt-4")
|
||||
|
||||
from crewai.llms.providers.azure.completion import AzureCompletion
|
||||
from crewai.llm.providers.azure.completion import AzureCompletion
|
||||
assert isinstance(llm, AzureCompletion)
|
||||
assert llm.api_key == "test-azure-key"
|
||||
|
||||
@@ -437,7 +437,7 @@ def test_azure_model_capabilities():
|
||||
"""
|
||||
# Test GPT-4 model (supports function calling)
|
||||
llm_gpt4 = LLM(model="azure/gpt-4")
|
||||
from crewai.llms.providers.azure.completion import AzureCompletion
|
||||
from crewai.llm.providers.azure.completion import AzureCompletion
|
||||
assert isinstance(llm_gpt4, AzureCompletion)
|
||||
assert llm_gpt4.is_openai_model == True
|
||||
assert llm_gpt4.supports_function_calling() == True
|
||||
@@ -466,7 +466,7 @@ def test_azure_completion_params_preparation():
|
||||
max_tokens=1000
|
||||
)
|
||||
|
||||
from crewai.llms.providers.azure.completion import AzureCompletion
|
||||
from crewai.llm.providers.azure.completion import AzureCompletion
|
||||
assert isinstance(llm, AzureCompletion)
|
||||
|
||||
messages = [{"role": "user", "content": "Hello"}]
|
||||
@@ -494,7 +494,7 @@ def test_azure_model_detection():
|
||||
|
||||
for model_name in azure_test_cases:
|
||||
llm = LLM(model=model_name)
|
||||
from crewai.llms.providers.azure.completion import AzureCompletion
|
||||
from crewai.llm.providers.azure.completion import AzureCompletion
|
||||
assert isinstance(llm, AzureCompletion), f"Failed for model: {model_name}"
|
||||
|
||||
|
||||
@@ -602,7 +602,7 @@ def test_azure_environment_variable_endpoint():
|
||||
}):
|
||||
llm = LLM(model="azure/gpt-4")
|
||||
|
||||
assert llm.client is not None
|
||||
assert llm._client is not None
|
||||
assert llm.endpoint == "https://test.openai.azure.com/openai/deployments/gpt-4"
|
||||
|
||||
|
||||
@@ -613,7 +613,7 @@ def test_azure_token_usage_tracking():
|
||||
llm = LLM(model="azure/gpt-4")
|
||||
|
||||
# Mock the Azure response with usage information
|
||||
with patch.object(llm.client, 'complete') as mock_complete:
|
||||
with patch.object(llm._client, 'complete') as mock_complete:
|
||||
mock_message = MagicMock()
|
||||
mock_message.content = "test response"
|
||||
mock_message.tool_calls = None
|
||||
@@ -651,7 +651,7 @@ def test_azure_http_error_handling():
|
||||
llm = LLM(model="azure/gpt-4")
|
||||
|
||||
# Mock an HTTP error
|
||||
with patch.object(llm.client, 'complete') as mock_complete:
|
||||
with patch.object(llm._client, 'complete') as mock_complete:
|
||||
mock_complete.side_effect = HttpResponseError(message="Rate limit exceeded", response=MagicMock(status_code=429))
|
||||
|
||||
with pytest.raises(HttpResponseError):
|
||||
@@ -662,13 +662,13 @@ def test_azure_streaming_completion():
|
||||
"""
|
||||
Test that streaming completions work properly
|
||||
"""
|
||||
from crewai.llms.providers.azure.completion import AzureCompletion
|
||||
from crewai.llm.providers.azure.completion import AzureCompletion
|
||||
from azure.ai.inference.models import StreamingChatCompletionsUpdate
|
||||
|
||||
llm = LLM(model="azure/gpt-4", stream=True)
|
||||
|
||||
# Mock streaming response
|
||||
with patch.object(llm.client, 'complete') as mock_complete:
|
||||
with patch.object(llm._client, 'complete') as mock_complete:
|
||||
# Create mock streaming updates with proper type
|
||||
mock_updates = []
|
||||
for chunk in ["Hello", " ", "world", "!"]:
|
||||
@@ -698,7 +698,7 @@ def test_azure_api_version_default():
|
||||
"""
|
||||
llm = LLM(model="azure/gpt-4")
|
||||
|
||||
from crewai.llms.providers.azure.completion import AzureCompletion
|
||||
from crewai.llm.providers.azure.completion import AzureCompletion
|
||||
assert isinstance(llm, AzureCompletion)
|
||||
# Should use default or environment variable
|
||||
assert llm.api_version is not None
|
||||
@@ -721,7 +721,7 @@ def test_azure_openai_endpoint_url_construction():
|
||||
"""
|
||||
Test that Azure OpenAI endpoint URLs are automatically constructed correctly
|
||||
"""
|
||||
from crewai.llms.providers.azure.completion import AzureCompletion
|
||||
from crewai.llm.providers.azure.completion import AzureCompletion
|
||||
|
||||
with patch.dict(os.environ, {
|
||||
"AZURE_API_KEY": "test-key",
|
||||
@@ -738,7 +738,7 @@ def test_azure_openai_endpoint_url_with_trailing_slash():
|
||||
"""
|
||||
Test that trailing slashes are handled correctly in endpoint URLs
|
||||
"""
|
||||
from crewai.llms.providers.azure.completion import AzureCompletion
|
||||
from crewai.llm.providers.azure.completion import AzureCompletion
|
||||
|
||||
with patch.dict(os.environ, {
|
||||
"AZURE_API_KEY": "test-key",
|
||||
@@ -804,7 +804,7 @@ def test_non_azure_openai_model_parameter_included():
|
||||
"""
|
||||
Test that model parameter IS included for non-Azure OpenAI endpoints
|
||||
"""
|
||||
from crewai.llms.providers.azure.completion import AzureCompletion
|
||||
from crewai.llm.providers.azure.completion import AzureCompletion
|
||||
|
||||
with patch.dict(os.environ, {
|
||||
"AZURE_API_KEY": "test-key",
|
||||
@@ -824,7 +824,7 @@ def test_azure_message_formatting_with_role():
|
||||
"""
|
||||
Test that messages are formatted with both 'role' and 'content' fields
|
||||
"""
|
||||
from crewai.llms.providers.azure.completion import AzureCompletion
|
||||
from crewai.llm.providers.azure.completion import AzureCompletion
|
||||
|
||||
llm = LLM(model="azure/gpt-4")
|
||||
|
||||
@@ -886,12 +886,12 @@ def test_azure_improved_error_messages():
|
||||
"""
|
||||
Test that improved error messages are provided for common HTTP errors
|
||||
"""
|
||||
from crewai.llms.providers.azure.completion import AzureCompletion
|
||||
from crewai.llm.providers.azure.completion import AzureCompletion
|
||||
from azure.core.exceptions import HttpResponseError
|
||||
|
||||
llm = LLM(model="azure/gpt-4")
|
||||
|
||||
with patch.object(llm.client, 'complete') as mock_complete:
|
||||
with patch.object(llm._client, 'complete') as mock_complete:
|
||||
error_401 = HttpResponseError(message="Unauthorized")
|
||||
error_401.status_code = 401
|
||||
mock_complete.side_effect = error_401
|
||||
@@ -918,7 +918,7 @@ def test_azure_api_version_properly_passed():
|
||||
"""
|
||||
Test that api_version is properly passed to the client
|
||||
"""
|
||||
from crewai.llms.providers.azure.completion import AzureCompletion
|
||||
from crewai.llm.providers.azure.completion import AzureCompletion
|
||||
|
||||
with patch.dict(os.environ, {
|
||||
"AZURE_API_KEY": "test-key",
|
||||
@@ -940,7 +940,7 @@ def test_azure_timeout_and_max_retries_stored():
|
||||
"""
|
||||
Test that timeout and max_retries parameters are stored
|
||||
"""
|
||||
from crewai.llms.providers.azure.completion import AzureCompletion
|
||||
from crewai.llm.providers.azure.completion import AzureCompletion
|
||||
|
||||
with patch.dict(os.environ, {
|
||||
"AZURE_API_KEY": "test-key",
|
||||
@@ -960,7 +960,7 @@ def test_azure_complete_params_include_optional_params():
|
||||
"""
|
||||
Test that optional parameters are included in completion params when set
|
||||
"""
|
||||
from crewai.llms.providers.azure.completion import AzureCompletion
|
||||
from crewai.llm.providers.azure.completion import AzureCompletion
|
||||
|
||||
with patch.dict(os.environ, {
|
||||
"AZURE_API_KEY": "test-key",
|
||||
@@ -992,7 +992,7 @@ def test_azure_endpoint_validation_with_azure_prefix():
|
||||
"""
|
||||
Test that 'azure/' prefix is properly stripped when constructing endpoint
|
||||
"""
|
||||
from crewai.llms.providers.azure.completion import AzureCompletion
|
||||
from crewai.llm.providers.azure.completion import AzureCompletion
|
||||
|
||||
with patch.dict(os.environ, {
|
||||
"AZURE_API_KEY": "test-key",
|
||||
@@ -1009,7 +1009,7 @@ def test_azure_message_formatting_preserves_all_roles():
|
||||
"""
|
||||
Test that all message roles (system, user, assistant) are preserved correctly
|
||||
"""
|
||||
from crewai.llms.providers.azure.completion import AzureCompletion
|
||||
from crewai.llm.providers.azure.completion import AzureCompletion
|
||||
|
||||
llm = LLM(model="azure/gpt-4")
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ def mock_aws_credentials():
|
||||
"AWS_DEFAULT_REGION": "us-east-1"
|
||||
}):
|
||||
# Mock boto3 Session to prevent actual AWS connections
|
||||
with patch('crewai.llms.providers.bedrock.completion.Session') as mock_session_class:
|
||||
with patch('crewai.llm.providers.bedrock.completion.Session') as mock_session_class:
|
||||
# Create mock session instance
|
||||
mock_session_instance = MagicMock()
|
||||
mock_client = MagicMock()
|
||||
@@ -67,7 +67,7 @@ def test_bedrock_completion_module_is_imported():
|
||||
"""
|
||||
Test that the completion module is properly imported when using Bedrock provider
|
||||
"""
|
||||
module_name = "crewai.llms.providers.bedrock.completion"
|
||||
module_name = "crewai.llm.providers.bedrock.completion"
|
||||
|
||||
# Remove module from cache if it exists
|
||||
if module_name in sys.modules:
|
||||
@@ -124,7 +124,7 @@ def test_bedrock_completion_initialization_parameters():
|
||||
region_name="us-west-2"
|
||||
)
|
||||
|
||||
from crewai.llms.providers.bedrock.completion import BedrockCompletion
|
||||
from crewai.llm.providers.bedrock.completion import BedrockCompletion
|
||||
assert isinstance(llm, BedrockCompletion)
|
||||
assert llm.model == "anthropic.claude-3-5-sonnet-20241022-v2:0"
|
||||
assert llm.temperature == 0.7
|
||||
@@ -145,9 +145,9 @@ def test_bedrock_specific_parameters():
|
||||
region_name="us-east-1"
|
||||
)
|
||||
|
||||
from crewai.llms.providers.bedrock.completion import BedrockCompletion
|
||||
from crewai.llm.providers.bedrock.completion import BedrockCompletion
|
||||
assert isinstance(llm, BedrockCompletion)
|
||||
assert llm.stop_sequences == ["Human:", "Assistant:"]
|
||||
assert llm.stop == ["Human:", "Assistant:"]
|
||||
assert llm.stream == True
|
||||
assert llm.region_name == "us-east-1"
|
||||
|
||||
@@ -369,7 +369,7 @@ def test_bedrock_aws_credentials_configuration():
|
||||
}):
|
||||
llm = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
|
||||
|
||||
from crewai.llms.providers.bedrock.completion import BedrockCompletion
|
||||
from crewai.llm.providers.bedrock.completion import BedrockCompletion
|
||||
assert isinstance(llm, BedrockCompletion)
|
||||
assert llm.region_name == "us-east-1"
|
||||
|
||||
@@ -390,7 +390,7 @@ def test_bedrock_model_capabilities():
|
||||
"""
|
||||
# Test Claude model
|
||||
llm_claude = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
|
||||
from crewai.llms.providers.bedrock.completion import BedrockCompletion
|
||||
from crewai.llm.providers.bedrock.completion import BedrockCompletion
|
||||
assert isinstance(llm_claude, BedrockCompletion)
|
||||
assert llm_claude.is_claude_model == True
|
||||
assert llm_claude.supports_tools == True
|
||||
@@ -413,7 +413,7 @@ def test_bedrock_inference_config():
|
||||
max_tokens=1000
|
||||
)
|
||||
|
||||
from crewai.llms.providers.bedrock.completion import BedrockCompletion
|
||||
from crewai.llm.providers.bedrock.completion import BedrockCompletion
|
||||
assert isinstance(llm, BedrockCompletion)
|
||||
|
||||
# Test config preparation
|
||||
@@ -444,7 +444,7 @@ def test_bedrock_model_detection():
|
||||
|
||||
for model_name in bedrock_test_cases:
|
||||
llm = LLM(model=model_name)
|
||||
from crewai.llms.providers.bedrock.completion import BedrockCompletion
|
||||
from crewai.llm.providers.bedrock.completion import BedrockCompletion
|
||||
assert isinstance(llm, BedrockCompletion), f"Failed for model: {model_name}"
|
||||
|
||||
|
||||
@@ -579,7 +579,7 @@ def test_bedrock_token_usage_tracking():
|
||||
llm = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
|
||||
|
||||
# Mock the Bedrock response with usage information
|
||||
with patch.object(llm.client, 'converse') as mock_converse:
|
||||
with patch.object(llm._client, 'converse') as mock_converse:
|
||||
mock_response = {
|
||||
'output': {
|
||||
'message': {
|
||||
@@ -624,7 +624,7 @@ def test_bedrock_tool_use_conversation_flow():
|
||||
available_functions = {"get_weather": mock_weather_tool}
|
||||
|
||||
# Mock the Bedrock client responses
|
||||
with patch.object(llm.client, 'converse') as mock_converse:
|
||||
with patch.object(llm._client, 'converse') as mock_converse:
|
||||
# First response: tool use request
|
||||
tool_use_response = {
|
||||
'output': {
|
||||
@@ -710,7 +710,7 @@ def test_bedrock_client_error_handling():
|
||||
llm = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
|
||||
|
||||
# Test ValidationException
|
||||
with patch.object(llm.client, 'converse') as mock_converse:
|
||||
with patch.object(llm._client, 'converse') as mock_converse:
|
||||
error_response = {
|
||||
'Error': {
|
||||
'Code': 'ValidationException',
|
||||
@@ -724,7 +724,7 @@ def test_bedrock_client_error_handling():
|
||||
assert "validation" in str(exc_info.value).lower()
|
||||
|
||||
# Test ThrottlingException
|
||||
with patch.object(llm.client, 'converse') as mock_converse:
|
||||
with patch.object(llm._client, 'converse') as mock_converse:
|
||||
error_response = {
|
||||
'Error': {
|
||||
'Code': 'ThrottlingException',
|
||||
@@ -739,23 +739,19 @@ def test_bedrock_client_error_handling():
|
||||
|
||||
|
||||
def test_bedrock_stop_sequences_sync():
|
||||
"""Test that stop and stop_sequences attributes stay synchronized."""
|
||||
"""Test that stop sequences can be set and retrieved correctly."""
|
||||
llm = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
|
||||
|
||||
# Test setting stop as a list
|
||||
llm.stop = ["\nObservation:", "\nThought:"]
|
||||
assert list(llm.stop_sequences) == ["\nObservation:", "\nThought:"]
|
||||
assert llm.stop == ["\nObservation:", "\nThought:"]
|
||||
|
||||
# Test setting stop as a string
|
||||
llm.stop = "\nFinal Answer:"
|
||||
assert list(llm.stop_sequences) == ["\nFinal Answer:"]
|
||||
assert llm.stop == ["\nFinal Answer:"]
|
||||
llm2 = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0", stop_sequences="\nFinal Answer:")
|
||||
assert llm2.stop == ["\nFinal Answer:"]
|
||||
|
||||
# Test setting stop as None
|
||||
llm.stop = None
|
||||
assert list(llm.stop_sequences) == []
|
||||
assert llm.stop == []
|
||||
assert llm.stop is None
|
||||
|
||||
|
||||
def test_bedrock_stop_sequences_sent_to_api():
|
||||
@@ -766,7 +762,7 @@ def test_bedrock_stop_sequences_sent_to_api():
|
||||
llm.stop = ["\nObservation:", "\nThought:"]
|
||||
|
||||
# Patch the API call to capture parameters without making real call
|
||||
with patch.object(llm.client, 'converse') as mock_converse:
|
||||
with patch.object(llm._client, 'converse') as mock_converse:
|
||||
mock_response = {
|
||||
'output': {
|
||||
'message': {
|
||||
|
||||
@@ -34,7 +34,7 @@ def test_gemini_completion_is_used_when_gemini_provider():
|
||||
"""
|
||||
llm = LLM(model="gemini/gemini-2.0-flash-001")
|
||||
|
||||
from crewai.llms.providers.gemini.completion import GeminiCompletion
|
||||
from crewai.llm.providers.gemini.completion import GeminiCompletion
|
||||
assert isinstance(llm, GeminiCompletion)
|
||||
assert llm.provider == "gemini"
|
||||
assert llm.model == "gemini-2.0-flash-001"
|
||||
@@ -47,7 +47,7 @@ def test_gemini_tool_use_conversation_flow():
|
||||
Test that the Gemini completion properly handles tool use conversation flow
|
||||
"""
|
||||
from unittest.mock import Mock, patch
|
||||
from crewai.llms.providers.gemini.completion import GeminiCompletion
|
||||
from crewai.llm.providers.gemini.completion import GeminiCompletion
|
||||
|
||||
# Create GeminiCompletion instance
|
||||
completion = GeminiCompletion(model="gemini-2.0-flash-001")
|
||||
@@ -59,7 +59,7 @@ def test_gemini_tool_use_conversation_flow():
|
||||
available_functions = {"get_weather": mock_weather_tool}
|
||||
|
||||
# Mock the Google Gemini client responses
|
||||
with patch.object(completion.client.models, 'generate_content') as mock_generate:
|
||||
with patch.object(completion._client.models, 'generate_content') as mock_generate:
|
||||
# Mock function call in response
|
||||
mock_function_call = Mock()
|
||||
mock_function_call.name = "get_weather"
|
||||
@@ -102,7 +102,7 @@ def test_gemini_completion_module_is_imported():
|
||||
"""
|
||||
Test that the completion module is properly imported when using Google provider
|
||||
"""
|
||||
module_name = "crewai.llms.providers.gemini.completion"
|
||||
module_name = "crewai.llm.providers.gemini.completion"
|
||||
|
||||
# Remove module from cache if it exists
|
||||
if module_name in sys.modules:
|
||||
@@ -159,7 +159,7 @@ def test_gemini_completion_initialization_parameters():
|
||||
api_key="test-key"
|
||||
)
|
||||
|
||||
from crewai.llms.providers.gemini.completion import GeminiCompletion
|
||||
from crewai.llm.providers.gemini.completion import GeminiCompletion
|
||||
assert isinstance(llm, GeminiCompletion)
|
||||
assert llm.model == "gemini-2.0-flash-001"
|
||||
assert llm.temperature == 0.7
|
||||
@@ -186,9 +186,9 @@ def test_gemini_specific_parameters():
|
||||
location="us-central1"
|
||||
)
|
||||
|
||||
from crewai.llms.providers.gemini.completion import GeminiCompletion
|
||||
from crewai.llm.providers.gemini.completion import GeminiCompletion
|
||||
assert isinstance(llm, GeminiCompletion)
|
||||
assert llm.stop_sequences == ["Human:", "Assistant:"]
|
||||
assert llm.stop == ["Human:", "Assistant:"]
|
||||
assert llm.stream == True
|
||||
assert llm.safety_settings == safety_settings
|
||||
assert llm.project == "test-project"
|
||||
@@ -382,7 +382,7 @@ def test_gemini_raises_error_when_model_not_supported():
|
||||
"""Test that GeminiCompletion raises ValueError when model not supported"""
|
||||
|
||||
# Mock the Google client to raise an error
|
||||
with patch('crewai.llms.providers.gemini.completion.genai') as mock_genai:
|
||||
with patch('crewai.llm.providers.gemini.completion.genai') as mock_genai:
|
||||
mock_client = MagicMock()
|
||||
mock_genai.Client.return_value = mock_client
|
||||
|
||||
@@ -420,7 +420,7 @@ def test_gemini_vertex_ai_setup():
|
||||
location="us-west1"
|
||||
)
|
||||
|
||||
from crewai.llms.providers.gemini.completion import GeminiCompletion
|
||||
from crewai.llm.providers.gemini.completion import GeminiCompletion
|
||||
assert isinstance(llm, GeminiCompletion)
|
||||
|
||||
assert llm.project == "test-project"
|
||||
@@ -435,7 +435,7 @@ def test_gemini_api_key_configuration():
|
||||
with patch.dict(os.environ, {"GOOGLE_API_KEY": "test-google-key"}):
|
||||
llm = LLM(model="google/gemini-2.0-flash-001")
|
||||
|
||||
from crewai.llms.providers.gemini.completion import GeminiCompletion
|
||||
from crewai.llm.providers.gemini.completion import GeminiCompletion
|
||||
assert isinstance(llm, GeminiCompletion)
|
||||
assert llm.api_key == "test-google-key"
|
||||
|
||||
@@ -453,7 +453,7 @@ def test_gemini_model_capabilities():
|
||||
"""
|
||||
# Test Gemini 2.0 model
|
||||
llm_2_0 = LLM(model="google/gemini-2.0-flash-001")
|
||||
from crewai.llms.providers.gemini.completion import GeminiCompletion
|
||||
from crewai.llm.providers.gemini.completion import GeminiCompletion
|
||||
assert isinstance(llm_2_0, GeminiCompletion)
|
||||
assert llm_2_0.is_gemini_2 == True
|
||||
assert llm_2_0.supports_tools == True
|
||||
@@ -477,7 +477,7 @@ def test_gemini_generation_config():
|
||||
max_output_tokens=1000
|
||||
)
|
||||
|
||||
from crewai.llms.providers.gemini.completion import GeminiCompletion
|
||||
from crewai.llm.providers.gemini.completion import GeminiCompletion
|
||||
assert isinstance(llm, GeminiCompletion)
|
||||
|
||||
# Test config preparation
|
||||
@@ -504,7 +504,7 @@ def test_gemini_model_detection():
|
||||
|
||||
for model_name in gemini_test_cases:
|
||||
llm = LLM(model=model_name)
|
||||
from crewai.llms.providers.gemini.completion import GeminiCompletion
|
||||
from crewai.llm.providers.gemini.completion import GeminiCompletion
|
||||
assert isinstance(llm, GeminiCompletion), f"Failed for model: {model_name}"
|
||||
|
||||
|
||||
@@ -614,8 +614,8 @@ def test_gemini_environment_variable_api_key():
|
||||
with patch.dict(os.environ, {"GOOGLE_API_KEY": "test-google-key"}):
|
||||
llm = LLM(model="google/gemini-2.0-flash-001")
|
||||
|
||||
assert llm.client is not None
|
||||
assert hasattr(llm.client, 'models')
|
||||
assert llm._client is not None
|
||||
assert hasattr(llm._client, 'models')
|
||||
assert llm.api_key == "test-google-key"
|
||||
|
||||
|
||||
@@ -626,7 +626,7 @@ def test_gemini_token_usage_tracking():
|
||||
llm = LLM(model="google/gemini-2.0-flash-001")
|
||||
|
||||
# Mock the Gemini response with usage information
|
||||
with patch.object(llm.client.models, 'generate_content') as mock_generate:
|
||||
with patch.object(llm._client.models, 'generate_content') as mock_generate:
|
||||
mock_response = MagicMock()
|
||||
mock_response.text = "test response"
|
||||
mock_response.candidates = []
|
||||
@@ -651,23 +651,20 @@ def test_gemini_token_usage_tracking():
|
||||
|
||||
|
||||
def test_gemini_stop_sequences_sync():
|
||||
"""Test that stop and stop_sequences attributes stay synchronized."""
|
||||
"""Test that stop sequences can be set and retrieved correctly."""
|
||||
llm = LLM(model="google/gemini-2.0-flash-001")
|
||||
|
||||
# Test setting stop as a list
|
||||
llm.stop = ["\nObservation:", "\nThought:"]
|
||||
assert llm.stop_sequences == ["\nObservation:", "\nThought:"]
|
||||
assert llm.stop == ["\nObservation:", "\nThought:"]
|
||||
|
||||
# Test setting stop as a string
|
||||
llm.stop = "\nFinal Answer:"
|
||||
assert llm.stop_sequences == ["\nFinal Answer:"]
|
||||
assert llm.stop == ["\nFinal Answer:"]
|
||||
assert llm.stop == "\nFinal Answer:"
|
||||
|
||||
# Test setting stop as None
|
||||
llm.stop = None
|
||||
assert llm.stop_sequences == []
|
||||
assert llm.stop == []
|
||||
assert llm.stop is None
|
||||
|
||||
|
||||
def test_gemini_stop_sequences_sent_to_api():
|
||||
@@ -678,7 +675,7 @@ def test_gemini_stop_sequences_sent_to_api():
|
||||
llm.stop = ["\nObservation:", "\nThought:"]
|
||||
|
||||
# Patch the API call to capture parameters without making real call
|
||||
with patch.object(llm.client.models, 'generate_content') as mock_generate:
|
||||
with patch.object(llm._client.models, 'generate_content') as mock_generate:
|
||||
mock_response = MagicMock()
|
||||
mock_response.text = "Hello"
|
||||
mock_response.candidates = []
|
||||
|
||||
@@ -6,7 +6,7 @@ import httpx
|
||||
import pytest
|
||||
|
||||
from crewai.llm import LLM
|
||||
from crewai.llms.hooks.base import BaseInterceptor
|
||||
from crewai.llm.hooks.base import BaseInterceptor
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
import httpx
|
||||
import pytest
|
||||
|
||||
from crewai.llms.hooks.base import BaseInterceptor
|
||||
from crewai.llm.hooks.base import BaseInterceptor
|
||||
|
||||
|
||||
class SimpleInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
|
||||
|
||||
@@ -4,7 +4,7 @@ import httpx
|
||||
import pytest
|
||||
|
||||
from crewai.llm import LLM
|
||||
from crewai.llms.hooks.base import BaseInterceptor
|
||||
from crewai.llm.hooks.base import BaseInterceptor
|
||||
|
||||
|
||||
class OpenAITestInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
|
||||
|
||||
@@ -5,8 +5,8 @@ from unittest.mock import Mock
|
||||
import httpx
|
||||
import pytest
|
||||
|
||||
from crewai.llms.hooks.base import BaseInterceptor
|
||||
from crewai.llms.hooks.transport import AsyncHTTPTransport, HTTPTransport
|
||||
from crewai.llm.hooks.base import BaseInterceptor
|
||||
from crewai.llm.hooks.transport import AsyncHTTPTransport, HTTPTransport
|
||||
|
||||
|
||||
class TrackingInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
|
||||
|
||||
@@ -6,7 +6,7 @@ import httpx
|
||||
import pytest
|
||||
|
||||
from crewai.llm import LLM
|
||||
from crewai.llms.hooks.base import BaseInterceptor
|
||||
from crewai.llm.hooks.base import BaseInterceptor
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
|
||||
@@ -6,7 +6,7 @@ import openai
|
||||
import pytest
|
||||
|
||||
from crewai.llm import LLM
|
||||
from crewai.llms.providers.openai.completion import OpenAICompletion
|
||||
from crewai.llm.providers.openai.completion import OpenAICompletion
|
||||
from crewai.crew import Crew
|
||||
from crewai.agent import Agent
|
||||
from crewai.task import Task
|
||||
@@ -29,7 +29,7 @@ def test_openai_completion_is_used_when_no_provider_prefix():
|
||||
"""
|
||||
llm = LLM(model="gpt-4o")
|
||||
|
||||
from crewai.llms.providers.openai.completion import OpenAICompletion
|
||||
from crewai.llm.providers.openai.completion import OpenAICompletion
|
||||
assert isinstance(llm, OpenAICompletion)
|
||||
assert llm.provider == "openai"
|
||||
assert llm.model == "gpt-4o"
|
||||
@@ -63,7 +63,7 @@ def test_openai_completion_module_is_imported():
|
||||
"""
|
||||
Test that the completion module is properly imported when using OpenAI provider
|
||||
"""
|
||||
module_name = "crewai.llms.providers.openai.completion"
|
||||
module_name = "crewai.llm.providers.openai.completion"
|
||||
|
||||
# Remove module from cache if it exists
|
||||
if module_name in sys.modules:
|
||||
@@ -114,7 +114,7 @@ def test_openai_completion_initialization_parameters():
|
||||
api_key="test-key"
|
||||
)
|
||||
|
||||
from crewai.llms.providers.openai.completion import OpenAICompletion
|
||||
from crewai.llm.providers.openai.completion import OpenAICompletion
|
||||
assert isinstance(llm, OpenAICompletion)
|
||||
assert llm.model == "gpt-4o"
|
||||
assert llm.temperature == 0.7
|
||||
@@ -335,7 +335,7 @@ def test_openai_completion_call_returns_usage_metrics():
|
||||
def test_openai_raises_error_when_model_not_supported():
|
||||
"""Test that OpenAICompletion raises ValueError when model not supported"""
|
||||
|
||||
with patch('crewai.llms.providers.openai.completion.OpenAI') as mock_openai_class:
|
||||
with patch('crewai.llm.providers.openai.completion.OpenAI') as mock_openai_class:
|
||||
mock_client = MagicMock()
|
||||
mock_openai_class.return_value = mock_client
|
||||
|
||||
@@ -369,11 +369,11 @@ def test_openai_client_setup_with_extra_arguments():
|
||||
assert llm.top_p == 0.5
|
||||
|
||||
# Check that client parameters are properly configured
|
||||
assert llm.client.max_retries == 3
|
||||
assert llm.client.timeout == 30
|
||||
assert llm._client.max_retries == 3
|
||||
assert llm._client.timeout == 30
|
||||
|
||||
# Test that parameters are properly used in API calls
|
||||
with patch.object(llm.client.chat.completions, 'create') as mock_create:
|
||||
with patch.object(llm._client.chat.completions, 'create') as mock_create:
|
||||
mock_create.return_value = MagicMock(
|
||||
choices=[MagicMock(message=MagicMock(content="test response", tool_calls=None))],
|
||||
usage=MagicMock(prompt_tokens=10, completion_tokens=20, total_tokens=30)
|
||||
@@ -394,7 +394,7 @@ def test_extra_arguments_are_passed_to_openai_completion():
|
||||
"""
|
||||
llm = LLM(model="gpt-4o", temperature=0.7, max_tokens=1000, top_p=0.5, max_retries=3)
|
||||
|
||||
with patch.object(llm.client.chat.completions, 'create') as mock_create:
|
||||
with patch.object(llm._client.chat.completions, 'create') as mock_create:
|
||||
mock_create.return_value = MagicMock(
|
||||
choices=[MagicMock(message=MagicMock(content="test response", tool_calls=None))],
|
||||
usage=MagicMock(prompt_tokens=10, completion_tokens=20, total_tokens=30)
|
||||
@@ -501,7 +501,7 @@ def test_openai_streaming_with_response_model():
|
||||
|
||||
llm = LLM(model="openai/gpt-4o", stream=True)
|
||||
|
||||
with patch.object(llm.client.chat.completions, "create") as mock_create:
|
||||
with patch.object(llm._client.chat.completions, "create") as mock_create:
|
||||
mock_chunk1 = MagicMock()
|
||||
mock_chunk1.choices = [
|
||||
MagicMock(delta=MagicMock(content='{"answer": "test", ', tool_calls=None))
|
||||
|
||||
@@ -2,7 +2,7 @@ from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
import pytest
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.llm.base_llm import BaseLLM
|
||||
from crewai.utilities.llm_utils import create_llm
|
||||
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ from crewai.events.event_types import (
|
||||
ToolUsageFinishedEvent,
|
||||
ToolUsageStartedEvent,
|
||||
)
|
||||
from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO, LLM
|
||||
from crewai.llm.core import CONTEXT_WINDOW_USAGE_RATIO, LLM
|
||||
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||
from pydantic import BaseModel
|
||||
import pytest
|
||||
@@ -229,7 +229,7 @@ def test_validate_call_params_supported():
|
||||
a: int
|
||||
|
||||
# Patch supports_response_schema to simulate a supported model.
|
||||
with patch("crewai.llm.supports_response_schema", return_value=True):
|
||||
with patch("crewai.llm.core.supports_response_schema", return_value=True):
|
||||
llm = LLM(
|
||||
model="openrouter/deepseek/deepseek-chat", response_format=DummyResponse
|
||||
)
|
||||
@@ -242,7 +242,7 @@ def test_validate_call_params_not_supported():
|
||||
a: int
|
||||
|
||||
# Patch supports_response_schema to simulate an unsupported model.
|
||||
with patch("crewai.llm.supports_response_schema", return_value=False):
|
||||
with patch("crewai.llm.core.supports_response_schema", return_value=False):
|
||||
llm = LLM(model="gemini/gemini-1.5-pro", response_format=DummyResponse, is_litellm=True)
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
llm._validate_call_params()
|
||||
@@ -342,7 +342,7 @@ def test_context_window_validation():
|
||||
# Test invalid window size
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
with patch.dict(
|
||||
"crewai.llm.LLM_CONTEXT_WINDOW_SIZES",
|
||||
"crewai.llm.core.LLM_CONTEXT_WINDOW_SIZES",
|
||||
{"test-model": 500}, # Below minimum
|
||||
clear=True,
|
||||
):
|
||||
@@ -702,8 +702,8 @@ def test_ollama_does_not_modify_when_last_is_user(ollama_llm):
|
||||
|
||||
def test_native_provider_raises_error_when_supported_but_fails():
|
||||
"""Test that when a native provider is in SUPPORTED_NATIVE_PROVIDERS but fails to instantiate, we raise the error."""
|
||||
with patch("crewai.llm.SUPPORTED_NATIVE_PROVIDERS", ["openai"]):
|
||||
with patch("crewai.llm.LLM._get_native_provider") as mock_get_native:
|
||||
with patch("crewai.llm.internal.meta.SUPPORTED_NATIVE_PROVIDERS", ["openai"]):
|
||||
with patch("crewai.llm.internal.meta.LLMMeta._get_native_provider") as mock_get_native:
|
||||
# Mock that provider exists but throws an error when instantiated
|
||||
mock_provider = MagicMock()
|
||||
mock_provider.side_effect = ValueError("Native provider initialization failed")
|
||||
@@ -718,7 +718,7 @@ def test_native_provider_raises_error_when_supported_but_fails():
|
||||
|
||||
def test_native_provider_falls_back_to_litellm_when_not_in_supported_list():
|
||||
"""Test that when a provider is not in SUPPORTED_NATIVE_PROVIDERS, we fall back to LiteLLM."""
|
||||
with patch("crewai.llm.SUPPORTED_NATIVE_PROVIDERS", ["openai", "anthropic"]):
|
||||
with patch("crewai.llm.internal.meta.SUPPORTED_NATIVE_PROVIDERS", ["openai", "anthropic"]):
|
||||
# Using a provider not in the supported list
|
||||
llm = LLM(model="groq/llama-3.1-70b-versatile", is_litellm=False)
|
||||
|
||||
|
||||
@@ -697,13 +697,8 @@ def test_save_task_json_output():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_save_task_pydantic_output(tmp_path, monkeypatch):
|
||||
"""Test saving pydantic output to a file.
|
||||
|
||||
Uses tmp_path fixture and monkeypatch to change directory to avoid
|
||||
file system race conditions on enterprise systems.
|
||||
"""
|
||||
from pathlib import Path
|
||||
def test_save_task_pydantic_output():
|
||||
import uuid
|
||||
|
||||
class ScoreOutput(BaseModel):
|
||||
score: int
|
||||
@@ -715,9 +710,7 @@ def test_save_task_pydantic_output(tmp_path, monkeypatch):
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
monkeypatch.chdir(tmp_path)
|
||||
|
||||
output_file = "score_output.json"
|
||||
output_file = f"score_{uuid.uuid4()}.json"
|
||||
task = Task(
|
||||
description="Give me an integer score between 1-5 for the following title: 'The impact of AI in the future of work'",
|
||||
expected_output="The score of the title.",
|
||||
@@ -729,9 +722,11 @@ def test_save_task_pydantic_output(tmp_path, monkeypatch):
|
||||
crew = Crew(agents=[scorer], tasks=[task])
|
||||
crew.kickoff()
|
||||
|
||||
output_path = Path(output_file).resolve()
|
||||
assert output_path.exists()
|
||||
assert {"score": 4} == json.loads(output_path.read_text())
|
||||
output_file_exists = os.path.exists(output_file)
|
||||
assert output_file_exists
|
||||
assert {"score": 4} == json.loads(open(output_file).read())
|
||||
if output_file_exists:
|
||||
os.remove(output_file)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
|
||||
@@ -1,112 +0,0 @@
|
||||
"""Tests to verify that traces are sent when enabled and not sent when disabled.
|
||||
|
||||
VCR will record HTTP interactions. Inspect cassettes to verify tracing behavior.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from crewai import Agent, Crew, Task
|
||||
from tests.utils import wait_for_event_handlers
|
||||
|
||||
|
||||
class TestTraceEnableDisable:
|
||||
"""Test suite to verify trace sending behavior with VCR cassette recording."""
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_no_http_calls_when_disabled_via_env(self):
|
||||
"""Test execution when tracing disabled via CREWAI_TRACING_ENABLED=false."""
|
||||
with pytest.MonkeyPatch.context() as mp:
|
||||
mp.setenv("CREWAI_TRACING_ENABLED", "false")
|
||||
mp.setenv("CREWAI_DISABLE_TELEMETRY", "false")
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
llm="gpt-4o-mini",
|
||||
)
|
||||
task = Task(
|
||||
description="Say hello",
|
||||
expected_output="hello",
|
||||
agent=agent,
|
||||
)
|
||||
crew = Crew(agents=[agent], tasks=[task], verbose=False)
|
||||
|
||||
result = crew.kickoff()
|
||||
wait_for_event_handlers()
|
||||
|
||||
assert result is not None
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_no_http_calls_when_disabled_via_tracing_false(self):
|
||||
"""Test execution when tracing=False explicitly set."""
|
||||
with pytest.MonkeyPatch.context() as mp:
|
||||
mp.setenv("CREWAI_DISABLE_TELEMETRY", "false")
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
llm="gpt-4o-mini",
|
||||
)
|
||||
task = Task(
|
||||
description="Say hello",
|
||||
expected_output="hello",
|
||||
agent=agent,
|
||||
)
|
||||
crew = Crew(agents=[agent], tasks=[task], verbose=False, tracing=False)
|
||||
|
||||
result = crew.kickoff()
|
||||
wait_for_event_handlers()
|
||||
|
||||
assert result is not None
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_trace_calls_when_enabled_via_env(self):
|
||||
"""Test execution when tracing enabled via CREWAI_TRACING_ENABLED=true."""
|
||||
with pytest.MonkeyPatch.context() as mp:
|
||||
mp.setenv("CREWAI_TRACING_ENABLED", "true")
|
||||
mp.setenv("CREWAI_DISABLE_TELEMETRY", "false")
|
||||
mp.setenv("OTEL_SDK_DISABLED", "false")
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
llm="gpt-4o-mini",
|
||||
)
|
||||
task = Task(
|
||||
description="Say hello",
|
||||
expected_output="hello",
|
||||
agent=agent,
|
||||
)
|
||||
crew = Crew(agents=[agent], tasks=[task], verbose=False)
|
||||
|
||||
result = crew.kickoff()
|
||||
wait_for_event_handlers()
|
||||
|
||||
assert result is not None
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_trace_calls_when_enabled_via_tracing_true(self):
|
||||
"""Test execution when tracing=True explicitly set."""
|
||||
with pytest.MonkeyPatch.context() as mp:
|
||||
mp.setenv("CREWAI_DISABLE_TELEMETRY", "false")
|
||||
mp.setenv("OTEL_SDK_DISABLED", "false")
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
llm="gpt-4o-mini",
|
||||
)
|
||||
task = Task(
|
||||
description="Say hello",
|
||||
expected_output="hello",
|
||||
agent=agent,
|
||||
)
|
||||
crew = Crew(agents=[agent], tasks=[task], verbose=False, tracing=True)
|
||||
|
||||
result = crew.kickoff()
|
||||
wait_for_event_handlers()
|
||||
|
||||
assert result is not None
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user