mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-09 16:18:30 +00:00
Merge branch 'main' of github.com:crewAIInc/crewAI into lorenze/pii-redaction-feat
This commit is contained in:
@@ -120,6 +120,8 @@ HEADERS_TO_FILTER = {
|
||||
"accept-encoding": "ACCEPT-ENCODING-XXX",
|
||||
"x-amzn-requestid": "X-AMZN-REQUESTID-XXX",
|
||||
"x-amzn-RequestId": "X-AMZN-REQUESTID-XXX",
|
||||
"x-a2a-notification-token": "X-A2A-NOTIFICATION-TOKEN-XXX",
|
||||
"x-a2a-version": "X-A2A-VERSION-XXX",
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -87,6 +87,10 @@ The `A2AConfig` class accepts the following parameters:
|
||||
When `True`, returns the A2A agent's result directly when it signals completion. When `False`, allows the server agent to review the result and potentially continue the conversation.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="updates" type="UpdateConfig" default="StreamingConfig()">
|
||||
Update mechanism for receiving task status. Options: `StreamingConfig`, `PollingConfig`, or `PushNotificationConfig`.
|
||||
</ParamField>
|
||||
|
||||
## Authentication
|
||||
|
||||
For A2A agents that require authentication, use one of the provided auth schemes:
|
||||
@@ -253,6 +257,74 @@ When `fail_fast=False`:
|
||||
- If all agents fail, the LLM receives a notice about unavailable agents and handles the task directly
|
||||
- Connection errors are captured and included in the context for better decision-making
|
||||
|
||||
## Update Mechanisms
|
||||
|
||||
Control how your agent receives task status updates from remote A2A agents:
|
||||
|
||||
<Tabs>
|
||||
<Tab title="Streaming (Default)">
|
||||
```python Code
|
||||
from crewai.a2a import A2AConfig
|
||||
from crewai.a2a.updates import StreamingConfig
|
||||
|
||||
agent = Agent(
|
||||
role="Research Coordinator",
|
||||
goal="Coordinate research tasks",
|
||||
backstory="Expert at delegation",
|
||||
llm="gpt-4o",
|
||||
a2a=A2AConfig(
|
||||
endpoint="https://research.example.com/.well-known/agent-card.json",
|
||||
updates=StreamingConfig()
|
||||
)
|
||||
)
|
||||
```
|
||||
</Tab>
|
||||
|
||||
<Tab title="Polling">
|
||||
```python Code
|
||||
from crewai.a2a import A2AConfig
|
||||
from crewai.a2a.updates import PollingConfig
|
||||
|
||||
agent = Agent(
|
||||
role="Research Coordinator",
|
||||
goal="Coordinate research tasks",
|
||||
backstory="Expert at delegation",
|
||||
llm="gpt-4o",
|
||||
a2a=A2AConfig(
|
||||
endpoint="https://research.example.com/.well-known/agent-card.json",
|
||||
updates=PollingConfig(
|
||||
interval=2.0,
|
||||
timeout=300.0,
|
||||
max_polls=100
|
||||
)
|
||||
)
|
||||
)
|
||||
```
|
||||
</Tab>
|
||||
|
||||
<Tab title="Push Notifications">
|
||||
```python Code
|
||||
from crewai.a2a import A2AConfig
|
||||
from crewai.a2a.updates import PushNotificationConfig
|
||||
|
||||
agent = Agent(
|
||||
role="Research Coordinator",
|
||||
goal="Coordinate research tasks",
|
||||
backstory="Expert at delegation",
|
||||
llm="gpt-4o",
|
||||
a2a=A2AConfig(
|
||||
endpoint="https://research.example.com/.well-known/agent-card.json",
|
||||
updates=PushNotificationConfig(
|
||||
url={base_url}/a2a/callback",
|
||||
token="your-validation-token",
|
||||
timeout=300.0
|
||||
)
|
||||
)
|
||||
)
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
## Best Practices
|
||||
|
||||
<CardGroup cols={2}>
|
||||
|
||||
@@ -12,7 +12,7 @@ dependencies = [
|
||||
"pytube~=15.0.0",
|
||||
"requests~=2.32.5",
|
||||
"docker~=7.1.0",
|
||||
"crewai==1.7.2",
|
||||
"crewai==1.8.0",
|
||||
"lancedb~=0.5.4",
|
||||
"tiktoken~=0.8.0",
|
||||
"beautifulsoup4~=4.13.4",
|
||||
|
||||
@@ -291,4 +291,4 @@ __all__ = [
|
||||
"ZapierActionTools",
|
||||
]
|
||||
|
||||
__version__ = "1.7.2"
|
||||
__version__ = "1.8.0"
|
||||
|
||||
@@ -49,7 +49,7 @@ Repository = "https://github.com/crewAIInc/crewAI"
|
||||
|
||||
[project.optional-dependencies]
|
||||
tools = [
|
||||
"crewai-tools==1.7.2",
|
||||
"crewai-tools==1.8.0",
|
||||
]
|
||||
embeddings = [
|
||||
"tiktoken~=0.8.0"
|
||||
|
||||
@@ -40,7 +40,7 @@ def _suppress_pydantic_deprecation_warnings() -> None:
|
||||
|
||||
_suppress_pydantic_deprecation_warnings()
|
||||
|
||||
__version__ = "1.7.2"
|
||||
__version__ = "1.8.0"
|
||||
_telemetry_submitted = False
|
||||
|
||||
|
||||
|
||||
@@ -3,4 +3,6 @@
|
||||
from crewai.a2a.config import A2AConfig
|
||||
|
||||
|
||||
__all__ = ["A2AConfig"]
|
||||
__all__ = [
|
||||
"A2AConfig",
|
||||
]
|
||||
|
||||
@@ -5,11 +5,12 @@ This module is separate from experimental.a2a to avoid circular imports.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Annotated
|
||||
from typing import Annotated, Any, ClassVar
|
||||
|
||||
from pydantic import (
|
||||
BaseModel,
|
||||
BeforeValidator,
|
||||
ConfigDict,
|
||||
Field,
|
||||
HttpUrl,
|
||||
TypeAdapter,
|
||||
@@ -18,6 +19,12 @@ from pydantic import (
|
||||
from crewai.a2a.auth.schemas import AuthScheme
|
||||
|
||||
|
||||
try:
|
||||
from crewai.a2a.updates import UpdateConfig
|
||||
except ImportError:
|
||||
UpdateConfig = Any # type: ignore[misc,assignment]
|
||||
|
||||
|
||||
http_url_adapter = TypeAdapter(HttpUrl)
|
||||
|
||||
Url = Annotated[
|
||||
@@ -28,23 +35,32 @@ Url = Annotated[
|
||||
]
|
||||
|
||||
|
||||
def _get_default_update_config() -> UpdateConfig:
|
||||
from crewai.a2a.updates import StreamingConfig
|
||||
|
||||
return StreamingConfig()
|
||||
|
||||
|
||||
class A2AConfig(BaseModel):
|
||||
"""Configuration for A2A protocol integration.
|
||||
|
||||
Attributes:
|
||||
endpoint: A2A agent endpoint URL.
|
||||
auth: Authentication scheme (Bearer, OAuth2, API Key, HTTP Basic/Digest).
|
||||
timeout: Request timeout in seconds (default: 120).
|
||||
max_turns: Maximum conversation turns with A2A agent (default: 10).
|
||||
auth: Authentication scheme.
|
||||
timeout: Request timeout in seconds.
|
||||
max_turns: Maximum conversation turns with A2A agent.
|
||||
response_model: Optional Pydantic model for structured A2A agent responses.
|
||||
fail_fast: If True, raise error when agent unreachable; if False, skip and continue (default: True).
|
||||
trust_remote_completion_status: If True, return A2A agent's result directly when status is "completed"; if False, always ask server agent to respond (default: False).
|
||||
fail_fast: If True, raise error when agent unreachable; if False, skip and continue.
|
||||
trust_remote_completion_status: If True, return A2A agent's result directly when completed.
|
||||
updates: Update mechanism config.
|
||||
"""
|
||||
|
||||
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
|
||||
|
||||
endpoint: Url = Field(description="A2A agent endpoint URL")
|
||||
auth: AuthScheme | None = Field(
|
||||
default=None,
|
||||
description="Authentication scheme (Bearer, OAuth2, API Key, HTTP Basic/Digest)",
|
||||
description="Authentication scheme",
|
||||
)
|
||||
timeout: int = Field(default=120, description="Request timeout in seconds")
|
||||
max_turns: int = Field(
|
||||
@@ -52,13 +68,17 @@ class A2AConfig(BaseModel):
|
||||
)
|
||||
response_model: type[BaseModel] | None = Field(
|
||||
default=None,
|
||||
description="Optional Pydantic model for structured A2A agent responses. When specified, the A2A agent is expected to return JSON matching this schema.",
|
||||
description="Optional Pydantic model for structured A2A agent responses",
|
||||
)
|
||||
fail_fast: bool = Field(
|
||||
default=True,
|
||||
description="If True, raise an error immediately when the A2A agent is unreachable. If False, skip the A2A agent and continue execution.",
|
||||
description="If True, raise error when agent unreachable; if False, skip",
|
||||
)
|
||||
trust_remote_completion_status: bool = Field(
|
||||
default=False,
|
||||
description='If True, return the A2A agent\'s result directly when status is "completed" without asking the server agent to respond. If False, always ask the server agent to respond, allowing it to potentially delegate again.',
|
||||
description="If True, return A2A result directly when completed",
|
||||
)
|
||||
updates: UpdateConfig = Field(
|
||||
default_factory=_get_default_update_config,
|
||||
description="Update mechanism config",
|
||||
)
|
||||
|
||||
7
lib/crewai/src/crewai/a2a/errors.py
Normal file
7
lib/crewai/src/crewai/a2a/errors.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""A2A protocol error types."""
|
||||
|
||||
from a2a.client.errors import A2AClientTimeoutError
|
||||
|
||||
|
||||
class A2APollingTimeoutError(A2AClientTimeoutError):
|
||||
"""Raised when polling exceeds the configured timeout."""
|
||||
322
lib/crewai/src/crewai/a2a/task_helpers.py
Normal file
322
lib/crewai/src/crewai/a2a/task_helpers.py
Normal file
@@ -0,0 +1,322 @@
|
||||
"""Helper functions for processing A2A task results."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import AsyncIterator
|
||||
from typing import TYPE_CHECKING, TypedDict
|
||||
import uuid
|
||||
|
||||
from a2a.types import (
|
||||
AgentCard,
|
||||
Message,
|
||||
Part,
|
||||
Role,
|
||||
Task,
|
||||
TaskArtifactUpdateEvent,
|
||||
TaskState,
|
||||
TaskStatusUpdateEvent,
|
||||
TextPart,
|
||||
)
|
||||
from typing_extensions import NotRequired
|
||||
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.a2a_events import A2AResponseReceivedEvent
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from a2a.types import Task as A2ATask
|
||||
|
||||
SendMessageEvent = (
|
||||
tuple[Task, TaskStatusUpdateEvent | TaskArtifactUpdateEvent | None] | Message
|
||||
)
|
||||
|
||||
|
||||
TERMINAL_STATES: frozenset[TaskState] = frozenset(
|
||||
{
|
||||
TaskState.completed,
|
||||
TaskState.failed,
|
||||
TaskState.rejected,
|
||||
TaskState.canceled,
|
||||
}
|
||||
)
|
||||
|
||||
ACTIONABLE_STATES: frozenset[TaskState] = frozenset(
|
||||
{
|
||||
TaskState.input_required,
|
||||
TaskState.auth_required,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class TaskStateResult(TypedDict):
|
||||
"""Result dictionary from processing A2A task state."""
|
||||
|
||||
status: TaskState
|
||||
history: list[Message]
|
||||
result: NotRequired[str]
|
||||
error: NotRequired[str]
|
||||
agent_card: NotRequired[AgentCard]
|
||||
|
||||
|
||||
def extract_task_result_parts(a2a_task: A2ATask) -> list[str]:
|
||||
"""Extract result parts from A2A task status message, history, and artifacts.
|
||||
|
||||
Args:
|
||||
a2a_task: A2A Task object with status, history, and artifacts
|
||||
|
||||
Returns:
|
||||
List of result text parts
|
||||
"""
|
||||
result_parts: list[str] = []
|
||||
|
||||
if a2a_task.status and a2a_task.status.message:
|
||||
msg = a2a_task.status.message
|
||||
result_parts.extend(
|
||||
part.root.text for part in msg.parts if part.root.kind == "text"
|
||||
)
|
||||
|
||||
if not result_parts and a2a_task.history:
|
||||
for history_msg in reversed(a2a_task.history):
|
||||
if history_msg.role == Role.agent:
|
||||
result_parts.extend(
|
||||
part.root.text
|
||||
for part in history_msg.parts
|
||||
if part.root.kind == "text"
|
||||
)
|
||||
break
|
||||
|
||||
if a2a_task.artifacts:
|
||||
result_parts.extend(
|
||||
part.root.text
|
||||
for artifact in a2a_task.artifacts
|
||||
for part in artifact.parts
|
||||
if part.root.kind == "text"
|
||||
)
|
||||
|
||||
return result_parts
|
||||
|
||||
|
||||
def extract_error_message(a2a_task: A2ATask, default: str) -> str:
|
||||
"""Extract error message from A2A task.
|
||||
|
||||
Args:
|
||||
a2a_task: A2A Task object
|
||||
default: Default message if no error found
|
||||
|
||||
Returns:
|
||||
Error message string
|
||||
"""
|
||||
if a2a_task.status and a2a_task.status.message:
|
||||
msg = a2a_task.status.message
|
||||
if msg:
|
||||
for part in msg.parts:
|
||||
if part.root.kind == "text":
|
||||
return str(part.root.text)
|
||||
return str(msg)
|
||||
|
||||
if a2a_task.history:
|
||||
for history_msg in reversed(a2a_task.history):
|
||||
for part in history_msg.parts:
|
||||
if part.root.kind == "text":
|
||||
return str(part.root.text)
|
||||
|
||||
return default
|
||||
|
||||
|
||||
def process_task_state(
|
||||
a2a_task: A2ATask,
|
||||
new_messages: list[Message],
|
||||
agent_card: AgentCard,
|
||||
turn_number: int,
|
||||
is_multiturn: bool,
|
||||
agent_role: str | None,
|
||||
result_parts: list[str] | None = None,
|
||||
) -> TaskStateResult | None:
|
||||
"""Process A2A task state and return result dictionary.
|
||||
|
||||
Shared logic for both polling and streaming handlers.
|
||||
|
||||
Args:
|
||||
a2a_task: The A2A task to process
|
||||
new_messages: List to collect messages (modified in place)
|
||||
agent_card: The agent card
|
||||
turn_number: Current turn number
|
||||
is_multiturn: Whether multi-turn conversation
|
||||
agent_role: Agent role for logging
|
||||
result_parts: Accumulated result parts (streaming passes accumulated,
|
||||
polling passes None to extract from task)
|
||||
|
||||
Returns:
|
||||
Result dictionary if terminal/actionable state, None otherwise
|
||||
"""
|
||||
should_extract = result_parts is None
|
||||
if result_parts is None:
|
||||
result_parts = []
|
||||
|
||||
if a2a_task.status.state == TaskState.completed:
|
||||
if should_extract:
|
||||
extracted_parts = extract_task_result_parts(a2a_task)
|
||||
result_parts.extend(extracted_parts)
|
||||
if a2a_task.history:
|
||||
new_messages.extend(a2a_task.history)
|
||||
|
||||
response_text = " ".join(result_parts) if result_parts else ""
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AResponseReceivedEvent(
|
||||
response=response_text,
|
||||
turn_number=turn_number,
|
||||
is_multiturn=is_multiturn,
|
||||
status="completed",
|
||||
agent_role=agent_role,
|
||||
),
|
||||
)
|
||||
|
||||
return TaskStateResult(
|
||||
status=TaskState.completed,
|
||||
agent_card=agent_card,
|
||||
result=response_text,
|
||||
history=new_messages,
|
||||
)
|
||||
|
||||
if a2a_task.status.state == TaskState.input_required:
|
||||
if a2a_task.history:
|
||||
new_messages.extend(a2a_task.history)
|
||||
|
||||
response_text = extract_error_message(a2a_task, "Additional input required")
|
||||
if response_text and not a2a_task.history:
|
||||
agent_message = Message(
|
||||
role=Role.agent,
|
||||
message_id=str(uuid.uuid4()),
|
||||
parts=[Part(root=TextPart(text=response_text))],
|
||||
context_id=a2a_task.context_id,
|
||||
task_id=a2a_task.id,
|
||||
)
|
||||
new_messages.append(agent_message)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AResponseReceivedEvent(
|
||||
response=response_text,
|
||||
turn_number=turn_number,
|
||||
is_multiturn=is_multiturn,
|
||||
status="input_required",
|
||||
agent_role=agent_role,
|
||||
),
|
||||
)
|
||||
|
||||
return TaskStateResult(
|
||||
status=TaskState.input_required,
|
||||
error=response_text,
|
||||
history=new_messages,
|
||||
agent_card=agent_card,
|
||||
)
|
||||
|
||||
if a2a_task.status.state in {TaskState.failed, TaskState.rejected}:
|
||||
error_msg = extract_error_message(a2a_task, "Task failed without error message")
|
||||
if a2a_task.history:
|
||||
new_messages.extend(a2a_task.history)
|
||||
return TaskStateResult(
|
||||
status=TaskState.failed,
|
||||
error=error_msg,
|
||||
history=new_messages,
|
||||
)
|
||||
|
||||
if a2a_task.status.state == TaskState.auth_required:
|
||||
error_msg = extract_error_message(a2a_task, "Authentication required")
|
||||
return TaskStateResult(
|
||||
status=TaskState.auth_required,
|
||||
error=error_msg,
|
||||
history=new_messages,
|
||||
)
|
||||
|
||||
if a2a_task.status.state == TaskState.canceled:
|
||||
error_msg = extract_error_message(a2a_task, "Task was canceled")
|
||||
return TaskStateResult(
|
||||
status=TaskState.canceled,
|
||||
error=error_msg,
|
||||
history=new_messages,
|
||||
)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
async def send_message_and_get_task_id(
|
||||
event_stream: AsyncIterator[SendMessageEvent],
|
||||
new_messages: list[Message],
|
||||
agent_card: AgentCard,
|
||||
turn_number: int,
|
||||
is_multiturn: bool,
|
||||
agent_role: str | None,
|
||||
) -> str | TaskStateResult:
|
||||
"""Send message and process initial response.
|
||||
|
||||
Handles the common pattern of sending a message and either:
|
||||
- Getting an immediate Message response (task completed synchronously)
|
||||
- Getting a Task that needs polling/waiting for completion
|
||||
|
||||
Args:
|
||||
event_stream: Async iterator from client.send_message()
|
||||
new_messages: List to collect messages (modified in place)
|
||||
agent_card: The agent card
|
||||
turn_number: Current turn number
|
||||
is_multiturn: Whether multi-turn conversation
|
||||
agent_role: Agent role for logging
|
||||
|
||||
Returns:
|
||||
Task ID string if agent needs polling/waiting, or TaskStateResult if done.
|
||||
"""
|
||||
try:
|
||||
async for event in event_stream:
|
||||
if isinstance(event, Message):
|
||||
new_messages.append(event)
|
||||
result_parts = [
|
||||
part.root.text for part in event.parts if part.root.kind == "text"
|
||||
]
|
||||
response_text = " ".join(result_parts) if result_parts else ""
|
||||
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AResponseReceivedEvent(
|
||||
response=response_text,
|
||||
turn_number=turn_number,
|
||||
is_multiturn=is_multiturn,
|
||||
status="completed",
|
||||
agent_role=agent_role,
|
||||
),
|
||||
)
|
||||
|
||||
return TaskStateResult(
|
||||
status=TaskState.completed,
|
||||
result=response_text,
|
||||
history=new_messages,
|
||||
agent_card=agent_card,
|
||||
)
|
||||
|
||||
if isinstance(event, tuple):
|
||||
a2a_task, _ = event
|
||||
|
||||
if a2a_task.status.state in TERMINAL_STATES | ACTIONABLE_STATES:
|
||||
result = process_task_state(
|
||||
a2a_task=a2a_task,
|
||||
new_messages=new_messages,
|
||||
agent_card=agent_card,
|
||||
turn_number=turn_number,
|
||||
is_multiturn=is_multiturn,
|
||||
agent_role=agent_role,
|
||||
)
|
||||
if result:
|
||||
return result
|
||||
|
||||
return a2a_task.id
|
||||
|
||||
return TaskStateResult(
|
||||
status=TaskState.failed,
|
||||
error="No task ID received from initial message",
|
||||
history=new_messages,
|
||||
)
|
||||
|
||||
finally:
|
||||
aclose = getattr(event_stream, "aclose", None)
|
||||
if aclose:
|
||||
await aclose()
|
||||
@@ -27,3 +27,14 @@ UNAVAILABLE_AGENTS_NOTICE_TEMPLATE: Final[Template] = Template(
|
||||
" $unavailable_agents"
|
||||
"\n</A2A_AGENTS_STATUS>\n"
|
||||
)
|
||||
REMOTE_AGENT_COMPLETED_NOTICE: Final[str] = """
|
||||
<REMOTE_AGENT_STATUS>
|
||||
STATUS: COMPLETED
|
||||
The remote agent has finished processing your request. Their response is in the conversation history above.
|
||||
You MUST now:
|
||||
1. Extract the answer from the conversation history
|
||||
2. Set is_a2a=false
|
||||
3. Return the answer as your final message
|
||||
DO NOT send another request - the task is already done.
|
||||
</REMOTE_AGENT_STATUS>
|
||||
"""
|
||||
|
||||
@@ -4,6 +4,16 @@ from typing import Any, Literal, Protocol, TypedDict, runtime_checkable
|
||||
|
||||
from typing_extensions import NotRequired
|
||||
|
||||
from crewai.a2a.updates import (
|
||||
PollingConfig,
|
||||
PollingHandler,
|
||||
PushNotificationConfig,
|
||||
PushNotificationHandler,
|
||||
StreamingConfig,
|
||||
StreamingHandler,
|
||||
UpdateConfig,
|
||||
)
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class AgentResponseProtocol(Protocol):
|
||||
@@ -36,3 +46,16 @@ class PartsDict(TypedDict):
|
||||
|
||||
text: str
|
||||
metadata: NotRequired[PartsMetadataDict]
|
||||
|
||||
|
||||
PollingHandlerType = type[PollingHandler]
|
||||
StreamingHandlerType = type[StreamingHandler]
|
||||
PushNotificationHandlerType = type[PushNotificationHandler]
|
||||
|
||||
HandlerType = PollingHandlerType | StreamingHandlerType | PushNotificationHandlerType
|
||||
|
||||
HANDLER_REGISTRY: dict[type[UpdateConfig], HandlerType] = {
|
||||
PollingConfig: PollingHandler,
|
||||
StreamingConfig: StreamingHandler,
|
||||
PushNotificationConfig: PushNotificationHandler,
|
||||
}
|
||||
|
||||
35
lib/crewai/src/crewai/a2a/updates/__init__.py
Normal file
35
lib/crewai/src/crewai/a2a/updates/__init__.py
Normal file
@@ -0,0 +1,35 @@
|
||||
"""A2A update mechanism configuration types."""
|
||||
|
||||
from crewai.a2a.updates.base import (
|
||||
BaseHandlerKwargs,
|
||||
PollingHandlerKwargs,
|
||||
PushNotificationHandlerKwargs,
|
||||
PushNotificationResultStore,
|
||||
StreamingHandlerKwargs,
|
||||
UpdateHandler,
|
||||
)
|
||||
from crewai.a2a.updates.polling.config import PollingConfig
|
||||
from crewai.a2a.updates.polling.handler import PollingHandler
|
||||
from crewai.a2a.updates.push_notifications.config import PushNotificationConfig
|
||||
from crewai.a2a.updates.push_notifications.handler import PushNotificationHandler
|
||||
from crewai.a2a.updates.streaming.config import StreamingConfig
|
||||
from crewai.a2a.updates.streaming.handler import StreamingHandler
|
||||
|
||||
|
||||
UpdateConfig = PollingConfig | StreamingConfig | PushNotificationConfig
|
||||
|
||||
__all__ = [
|
||||
"BaseHandlerKwargs",
|
||||
"PollingConfig",
|
||||
"PollingHandler",
|
||||
"PollingHandlerKwargs",
|
||||
"PushNotificationConfig",
|
||||
"PushNotificationHandler",
|
||||
"PushNotificationHandlerKwargs",
|
||||
"PushNotificationResultStore",
|
||||
"StreamingConfig",
|
||||
"StreamingHandler",
|
||||
"StreamingHandlerKwargs",
|
||||
"UpdateConfig",
|
||||
"UpdateHandler",
|
||||
]
|
||||
131
lib/crewai/src/crewai/a2a/updates/base.py
Normal file
131
lib/crewai/src/crewai/a2a/updates/base.py
Normal file
@@ -0,0 +1,131 @@
|
||||
"""Base types for A2A update mechanism handlers."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Any, Protocol, TypedDict
|
||||
|
||||
from pydantic import GetCoreSchemaHandler
|
||||
from pydantic_core import CoreSchema, core_schema
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from a2a.client import Client
|
||||
from a2a.types import AgentCard, Message, Task
|
||||
|
||||
from crewai.a2a.task_helpers import TaskStateResult
|
||||
from crewai.a2a.updates.push_notifications.config import PushNotificationConfig
|
||||
|
||||
|
||||
class BaseHandlerKwargs(TypedDict, total=False):
|
||||
"""Base kwargs shared by all handlers."""
|
||||
|
||||
turn_number: int
|
||||
is_multiturn: bool
|
||||
agent_role: str | None
|
||||
|
||||
|
||||
class PollingHandlerKwargs(BaseHandlerKwargs, total=False):
|
||||
"""Kwargs for polling handler."""
|
||||
|
||||
polling_interval: float
|
||||
polling_timeout: float
|
||||
endpoint: str
|
||||
agent_branch: Any
|
||||
history_length: int
|
||||
max_polls: int | None
|
||||
|
||||
|
||||
class StreamingHandlerKwargs(BaseHandlerKwargs, total=False):
|
||||
"""Kwargs for streaming handler."""
|
||||
|
||||
context_id: str | None
|
||||
task_id: str | None
|
||||
|
||||
|
||||
class PushNotificationHandlerKwargs(BaseHandlerKwargs, total=False):
|
||||
"""Kwargs for push notification handler."""
|
||||
|
||||
config: PushNotificationConfig
|
||||
result_store: PushNotificationResultStore
|
||||
polling_timeout: float
|
||||
polling_interval: float
|
||||
agent_branch: Any
|
||||
|
||||
|
||||
class PushNotificationResultStore(Protocol):
|
||||
"""Protocol for storing and retrieving push notification results.
|
||||
|
||||
This protocol defines the interface for a result store that the
|
||||
PushNotificationHandler uses to wait for task completion.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def __get_pydantic_core_schema__(
|
||||
cls,
|
||||
source_type: Any,
|
||||
handler: GetCoreSchemaHandler,
|
||||
) -> CoreSchema:
|
||||
return core_schema.any_schema()
|
||||
|
||||
async def wait_for_result(
|
||||
self,
|
||||
task_id: str,
|
||||
timeout: float,
|
||||
poll_interval: float = 1.0,
|
||||
) -> Task | None:
|
||||
"""Wait for a task result to be available.
|
||||
|
||||
Args:
|
||||
task_id: The task ID to wait for.
|
||||
timeout: Max seconds to wait before returning None.
|
||||
poll_interval: Seconds between polling attempts.
|
||||
|
||||
Returns:
|
||||
The completed Task object, or None if timeout.
|
||||
"""
|
||||
...
|
||||
|
||||
async def get_result(self, task_id: str) -> Task | None:
|
||||
"""Get a task result if available.
|
||||
|
||||
Args:
|
||||
task_id: The task ID to retrieve.
|
||||
|
||||
Returns:
|
||||
The Task object if available, None otherwise.
|
||||
"""
|
||||
...
|
||||
|
||||
async def store_result(self, task: Task) -> None:
|
||||
"""Store a task result.
|
||||
|
||||
Args:
|
||||
task: The Task object to store.
|
||||
"""
|
||||
...
|
||||
|
||||
|
||||
class UpdateHandler(Protocol):
|
||||
"""Protocol for A2A update mechanism handlers."""
|
||||
|
||||
@staticmethod
|
||||
async def execute(
|
||||
client: Client,
|
||||
message: Message,
|
||||
new_messages: list[Message],
|
||||
agent_card: AgentCard,
|
||||
**kwargs: Any,
|
||||
) -> TaskStateResult:
|
||||
"""Execute the update mechanism and return result.
|
||||
|
||||
Args:
|
||||
client: A2A client instance.
|
||||
message: Message to send.
|
||||
new_messages: List to collect messages (modified in place).
|
||||
agent_card: The agent card.
|
||||
**kwargs: Additional handler-specific parameters.
|
||||
|
||||
Returns:
|
||||
Result dictionary with status, result/error, and history.
|
||||
"""
|
||||
...
|
||||
1
lib/crewai/src/crewai/a2a/updates/polling/__init__.py
Normal file
1
lib/crewai/src/crewai/a2a/updates/polling/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Polling update mechanism module."""
|
||||
25
lib/crewai/src/crewai/a2a/updates/polling/config.py
Normal file
25
lib/crewai/src/crewai/a2a/updates/polling/config.py
Normal file
@@ -0,0 +1,25 @@
|
||||
"""Polling update mechanism configuration."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class PollingConfig(BaseModel):
|
||||
"""Configuration for polling-based task updates.
|
||||
|
||||
Attributes:
|
||||
interval: Seconds between poll attempts.
|
||||
timeout: Max seconds to poll before raising timeout error.
|
||||
max_polls: Max number of poll attempts.
|
||||
history_length: Number of messages to retrieve per poll.
|
||||
"""
|
||||
|
||||
interval: float = Field(
|
||||
default=2.0, gt=0, description="Seconds between poll attempts"
|
||||
)
|
||||
timeout: float | None = Field(default=None, gt=0, description="Max seconds to poll")
|
||||
max_polls: int | None = Field(default=None, gt=0, description="Max poll attempts")
|
||||
history_length: int = Field(
|
||||
default=100, gt=0, description="Messages to retrieve per poll"
|
||||
)
|
||||
246
lib/crewai/src/crewai/a2a/updates/polling/handler.py
Normal file
246
lib/crewai/src/crewai/a2a/updates/polling/handler.py
Normal file
@@ -0,0 +1,246 @@
|
||||
"""Polling update mechanism handler."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import time
|
||||
from typing import TYPE_CHECKING, Any
|
||||
import uuid
|
||||
|
||||
from a2a.client import Client
|
||||
from a2a.client.errors import A2AClientHTTPError
|
||||
from a2a.types import (
|
||||
AgentCard,
|
||||
Message,
|
||||
Part,
|
||||
Role,
|
||||
TaskQueryParams,
|
||||
TaskState,
|
||||
TextPart,
|
||||
)
|
||||
from typing_extensions import Unpack
|
||||
|
||||
from crewai.a2a.errors import A2APollingTimeoutError
|
||||
from crewai.a2a.task_helpers import (
|
||||
ACTIONABLE_STATES,
|
||||
TERMINAL_STATES,
|
||||
TaskStateResult,
|
||||
process_task_state,
|
||||
send_message_and_get_task_id,
|
||||
)
|
||||
from crewai.a2a.updates.base import PollingHandlerKwargs
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.a2a_events import (
|
||||
A2APollingStartedEvent,
|
||||
A2APollingStatusEvent,
|
||||
A2AResponseReceivedEvent,
|
||||
)
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from a2a.types import Task as A2ATask
|
||||
|
||||
|
||||
async def _poll_task_until_complete(
|
||||
client: Client,
|
||||
task_id: str,
|
||||
polling_interval: float,
|
||||
polling_timeout: float,
|
||||
agent_branch: Any | None = None,
|
||||
history_length: int = 100,
|
||||
max_polls: int | None = None,
|
||||
) -> A2ATask:
|
||||
"""Poll task status until terminal state reached.
|
||||
|
||||
Args:
|
||||
client: A2A client instance
|
||||
task_id: Task ID to poll
|
||||
polling_interval: Seconds between poll attempts
|
||||
polling_timeout: Max seconds before timeout
|
||||
agent_branch: Agent tree branch for logging
|
||||
history_length: Number of messages to retrieve per poll
|
||||
max_polls: Max number of poll attempts (None = unlimited)
|
||||
|
||||
Returns:
|
||||
Final task object in terminal state
|
||||
|
||||
Raises:
|
||||
A2APollingTimeoutError: If polling exceeds timeout or max_polls
|
||||
"""
|
||||
start_time = time.monotonic()
|
||||
poll_count = 0
|
||||
|
||||
while True:
|
||||
poll_count += 1
|
||||
task = await client.get_task(
|
||||
TaskQueryParams(id=task_id, history_length=history_length)
|
||||
)
|
||||
|
||||
elapsed = time.monotonic() - start_time
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2APollingStatusEvent(
|
||||
task_id=task_id,
|
||||
state=str(task.status.state.value) if task.status.state else "unknown",
|
||||
elapsed_seconds=elapsed,
|
||||
poll_count=poll_count,
|
||||
),
|
||||
)
|
||||
|
||||
if task.status.state in TERMINAL_STATES | ACTIONABLE_STATES:
|
||||
return task
|
||||
|
||||
if elapsed > polling_timeout:
|
||||
raise A2APollingTimeoutError(
|
||||
f"Polling timeout after {polling_timeout}s ({poll_count} polls)"
|
||||
)
|
||||
|
||||
if max_polls and poll_count >= max_polls:
|
||||
raise A2APollingTimeoutError(
|
||||
f"Max polls ({max_polls}) exceeded after {elapsed:.1f}s"
|
||||
)
|
||||
|
||||
await asyncio.sleep(polling_interval)
|
||||
|
||||
|
||||
class PollingHandler:
|
||||
"""Polling-based update handler."""
|
||||
|
||||
@staticmethod
|
||||
async def execute(
|
||||
client: Client,
|
||||
message: Message,
|
||||
new_messages: list[Message],
|
||||
agent_card: AgentCard,
|
||||
**kwargs: Unpack[PollingHandlerKwargs],
|
||||
) -> TaskStateResult:
|
||||
"""Execute A2A delegation using polling for updates.
|
||||
|
||||
Args:
|
||||
client: A2A client instance.
|
||||
message: Message to send.
|
||||
new_messages: List to collect messages.
|
||||
agent_card: The agent card.
|
||||
**kwargs: Polling-specific parameters.
|
||||
|
||||
Returns:
|
||||
Dictionary with status, result/error, and history.
|
||||
"""
|
||||
polling_interval = kwargs.get("polling_interval", 2.0)
|
||||
polling_timeout = kwargs.get("polling_timeout", 300.0)
|
||||
endpoint = kwargs.get("endpoint", "")
|
||||
agent_branch = kwargs.get("agent_branch")
|
||||
turn_number = kwargs.get("turn_number", 0)
|
||||
is_multiturn = kwargs.get("is_multiturn", False)
|
||||
agent_role = kwargs.get("agent_role")
|
||||
history_length = kwargs.get("history_length", 100)
|
||||
max_polls = kwargs.get("max_polls")
|
||||
context_id = kwargs.get("context_id")
|
||||
task_id = kwargs.get("task_id")
|
||||
|
||||
try:
|
||||
result_or_task_id = await send_message_and_get_task_id(
|
||||
event_stream=client.send_message(message),
|
||||
new_messages=new_messages,
|
||||
agent_card=agent_card,
|
||||
turn_number=turn_number,
|
||||
is_multiturn=is_multiturn,
|
||||
agent_role=agent_role,
|
||||
)
|
||||
|
||||
if not isinstance(result_or_task_id, str):
|
||||
return result_or_task_id
|
||||
|
||||
task_id = result_or_task_id
|
||||
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2APollingStartedEvent(
|
||||
task_id=task_id,
|
||||
polling_interval=polling_interval,
|
||||
endpoint=endpoint,
|
||||
),
|
||||
)
|
||||
|
||||
final_task = await _poll_task_until_complete(
|
||||
client=client,
|
||||
task_id=task_id,
|
||||
polling_interval=polling_interval,
|
||||
polling_timeout=polling_timeout,
|
||||
agent_branch=agent_branch,
|
||||
history_length=history_length,
|
||||
max_polls=max_polls,
|
||||
)
|
||||
|
||||
result = process_task_state(
|
||||
a2a_task=final_task,
|
||||
new_messages=new_messages,
|
||||
agent_card=agent_card,
|
||||
turn_number=turn_number,
|
||||
is_multiturn=is_multiturn,
|
||||
agent_role=agent_role,
|
||||
)
|
||||
if result:
|
||||
return result
|
||||
|
||||
return TaskStateResult(
|
||||
status=TaskState.failed,
|
||||
error=f"Unexpected task state: {final_task.status.state}",
|
||||
history=new_messages,
|
||||
)
|
||||
|
||||
except A2APollingTimeoutError as e:
|
||||
error_msg = str(e)
|
||||
|
||||
error_message = Message(
|
||||
role=Role.agent,
|
||||
message_id=str(uuid.uuid4()),
|
||||
parts=[Part(root=TextPart(text=error_msg))],
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
)
|
||||
new_messages.append(error_message)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AResponseReceivedEvent(
|
||||
response=error_msg,
|
||||
turn_number=turn_number,
|
||||
is_multiturn=is_multiturn,
|
||||
status="failed",
|
||||
agent_role=agent_role,
|
||||
),
|
||||
)
|
||||
return TaskStateResult(
|
||||
status=TaskState.failed,
|
||||
error=error_msg,
|
||||
history=new_messages,
|
||||
)
|
||||
|
||||
except A2AClientHTTPError as e:
|
||||
error_msg = f"HTTP Error {e.status_code}: {e!s}"
|
||||
|
||||
error_message = Message(
|
||||
role=Role.agent,
|
||||
message_id=str(uuid.uuid4()),
|
||||
parts=[Part(root=TextPart(text=error_msg))],
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
)
|
||||
new_messages.append(error_message)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AResponseReceivedEvent(
|
||||
response=error_msg,
|
||||
turn_number=turn_number,
|
||||
is_multiturn=is_multiturn,
|
||||
status="failed",
|
||||
agent_role=agent_role,
|
||||
),
|
||||
)
|
||||
return TaskStateResult(
|
||||
status=TaskState.failed,
|
||||
error=error_msg,
|
||||
history=new_messages,
|
||||
)
|
||||
@@ -0,0 +1 @@
|
||||
"""Push notification update mechanism module."""
|
||||
@@ -0,0 +1,38 @@
|
||||
"""Push notification update mechanism configuration."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from a2a.types import PushNotificationAuthenticationInfo
|
||||
from pydantic import AnyHttpUrl, BaseModel, Field
|
||||
|
||||
from crewai.a2a.updates.base import PushNotificationResultStore
|
||||
|
||||
|
||||
class PushNotificationConfig(BaseModel):
|
||||
"""Configuration for webhook-based task updates.
|
||||
|
||||
Attributes:
|
||||
url: Callback URL where agent sends push notifications.
|
||||
id: Unique identifier for this config.
|
||||
token: Token to validate incoming notifications.
|
||||
authentication: Auth info for agent to use when calling webhook.
|
||||
timeout: Max seconds to wait for task completion.
|
||||
interval: Seconds between result polling attempts.
|
||||
result_store: Store for receiving push notification results.
|
||||
"""
|
||||
|
||||
url: AnyHttpUrl = Field(description="Callback URL for push notifications")
|
||||
id: str | None = Field(default=None, description="Unique config identifier")
|
||||
token: str | None = Field(default=None, description="Validation token")
|
||||
authentication: PushNotificationAuthenticationInfo | None = Field(
|
||||
default=None, description="Auth info for agent to use when calling webhook"
|
||||
)
|
||||
timeout: float | None = Field(
|
||||
default=300.0, gt=0, description="Max seconds to wait for task completion"
|
||||
)
|
||||
interval: float = Field(
|
||||
default=2.0, gt=0, description="Seconds between result polling attempts"
|
||||
)
|
||||
result_store: PushNotificationResultStore | None = Field(
|
||||
default=None, description="Result store for push notification handling"
|
||||
)
|
||||
220
lib/crewai/src/crewai/a2a/updates/push_notifications/handler.py
Normal file
220
lib/crewai/src/crewai/a2a/updates/push_notifications/handler.py
Normal file
@@ -0,0 +1,220 @@
|
||||
"""Push notification (webhook) update mechanism handler."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any
|
||||
import uuid
|
||||
|
||||
from a2a.client import Client
|
||||
from a2a.client.errors import A2AClientHTTPError
|
||||
from a2a.types import (
|
||||
AgentCard,
|
||||
Message,
|
||||
Part,
|
||||
Role,
|
||||
TaskState,
|
||||
TextPart,
|
||||
)
|
||||
from typing_extensions import Unpack
|
||||
|
||||
from crewai.a2a.task_helpers import (
|
||||
TaskStateResult,
|
||||
process_task_state,
|
||||
send_message_and_get_task_id,
|
||||
)
|
||||
from crewai.a2a.updates.base import (
|
||||
PushNotificationHandlerKwargs,
|
||||
PushNotificationResultStore,
|
||||
)
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.a2a_events import (
|
||||
A2APushNotificationRegisteredEvent,
|
||||
A2APushNotificationTimeoutEvent,
|
||||
A2AResponseReceivedEvent,
|
||||
)
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from a2a.types import Task as A2ATask
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def _wait_for_push_result(
|
||||
task_id: str,
|
||||
result_store: PushNotificationResultStore,
|
||||
timeout: float,
|
||||
poll_interval: float,
|
||||
agent_branch: Any | None = None,
|
||||
) -> A2ATask | None:
|
||||
"""Wait for push notification result.
|
||||
|
||||
Args:
|
||||
task_id: Task ID to wait for.
|
||||
result_store: Store to retrieve results from.
|
||||
timeout: Max seconds to wait.
|
||||
poll_interval: Seconds between polling attempts.
|
||||
agent_branch: Agent tree branch for logging.
|
||||
|
||||
Returns:
|
||||
Final task object, or None if timeout.
|
||||
"""
|
||||
task = await result_store.wait_for_result(
|
||||
task_id=task_id,
|
||||
timeout=timeout,
|
||||
poll_interval=poll_interval,
|
||||
)
|
||||
|
||||
if task is None:
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2APushNotificationTimeoutEvent(
|
||||
task_id=task_id,
|
||||
timeout_seconds=timeout,
|
||||
),
|
||||
)
|
||||
|
||||
return task
|
||||
|
||||
|
||||
class PushNotificationHandler:
|
||||
"""Push notification (webhook) based update handler."""
|
||||
|
||||
@staticmethod
|
||||
async def execute(
|
||||
client: Client,
|
||||
message: Message,
|
||||
new_messages: list[Message],
|
||||
agent_card: AgentCard,
|
||||
**kwargs: Unpack[PushNotificationHandlerKwargs],
|
||||
) -> TaskStateResult:
|
||||
"""Execute A2A delegation using push notifications for updates.
|
||||
|
||||
Args:
|
||||
client: A2A client instance.
|
||||
message: Message to send.
|
||||
new_messages: List to collect messages.
|
||||
agent_card: The agent card.
|
||||
**kwargs: Push notification-specific parameters.
|
||||
|
||||
Returns:
|
||||
Dictionary with status, result/error, and history.
|
||||
|
||||
Raises:
|
||||
ValueError: If result_store or config not provided.
|
||||
"""
|
||||
config = kwargs.get("config")
|
||||
result_store = kwargs.get("result_store")
|
||||
polling_timeout = kwargs.get("polling_timeout", 300.0)
|
||||
polling_interval = kwargs.get("polling_interval", 2.0)
|
||||
agent_branch = kwargs.get("agent_branch")
|
||||
turn_number = kwargs.get("turn_number", 0)
|
||||
is_multiturn = kwargs.get("is_multiturn", False)
|
||||
agent_role = kwargs.get("agent_role")
|
||||
context_id = kwargs.get("context_id")
|
||||
task_id = kwargs.get("task_id")
|
||||
|
||||
if config is None:
|
||||
return TaskStateResult(
|
||||
status=TaskState.failed,
|
||||
error="PushNotificationConfig is required for push notification handler",
|
||||
history=new_messages,
|
||||
)
|
||||
|
||||
if result_store is None:
|
||||
return TaskStateResult(
|
||||
status=TaskState.failed,
|
||||
error="PushNotificationResultStore is required for push notification handler",
|
||||
history=new_messages,
|
||||
)
|
||||
|
||||
try:
|
||||
result_or_task_id = await send_message_and_get_task_id(
|
||||
event_stream=client.send_message(message),
|
||||
new_messages=new_messages,
|
||||
agent_card=agent_card,
|
||||
turn_number=turn_number,
|
||||
is_multiturn=is_multiturn,
|
||||
agent_role=agent_role,
|
||||
)
|
||||
|
||||
if not isinstance(result_or_task_id, str):
|
||||
return result_or_task_id
|
||||
|
||||
task_id = result_or_task_id
|
||||
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2APushNotificationRegisteredEvent(
|
||||
task_id=task_id,
|
||||
callback_url=str(config.url),
|
||||
),
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
"Push notification callback for task %s configured at %s (via initial request)",
|
||||
task_id,
|
||||
config.url,
|
||||
)
|
||||
|
||||
final_task = await _wait_for_push_result(
|
||||
task_id=task_id,
|
||||
result_store=result_store,
|
||||
timeout=polling_timeout,
|
||||
poll_interval=polling_interval,
|
||||
agent_branch=agent_branch,
|
||||
)
|
||||
|
||||
if final_task is None:
|
||||
return TaskStateResult(
|
||||
status=TaskState.failed,
|
||||
error=f"Push notification timeout after {polling_timeout}s",
|
||||
history=new_messages,
|
||||
)
|
||||
|
||||
result = process_task_state(
|
||||
a2a_task=final_task,
|
||||
new_messages=new_messages,
|
||||
agent_card=agent_card,
|
||||
turn_number=turn_number,
|
||||
is_multiturn=is_multiturn,
|
||||
agent_role=agent_role,
|
||||
)
|
||||
if result:
|
||||
return result
|
||||
|
||||
return TaskStateResult(
|
||||
status=TaskState.failed,
|
||||
error=f"Unexpected task state: {final_task.status.state}",
|
||||
history=new_messages,
|
||||
)
|
||||
|
||||
except A2AClientHTTPError as e:
|
||||
error_msg = f"HTTP Error {e.status_code}: {e!s}"
|
||||
|
||||
error_message = Message(
|
||||
role=Role.agent,
|
||||
message_id=str(uuid.uuid4()),
|
||||
parts=[Part(root=TextPart(text=error_msg))],
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
)
|
||||
new_messages.append(error_message)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AResponseReceivedEvent(
|
||||
response=error_msg,
|
||||
turn_number=turn_number,
|
||||
is_multiturn=is_multiturn,
|
||||
status="failed",
|
||||
agent_role=agent_role,
|
||||
),
|
||||
)
|
||||
return TaskStateResult(
|
||||
status=TaskState.failed,
|
||||
error=error_msg,
|
||||
history=new_messages,
|
||||
)
|
||||
1
lib/crewai/src/crewai/a2a/updates/streaming/__init__.py
Normal file
1
lib/crewai/src/crewai/a2a/updates/streaming/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Streaming update mechanism module."""
|
||||
9
lib/crewai/src/crewai/a2a/updates/streaming/config.py
Normal file
9
lib/crewai/src/crewai/a2a/updates/streaming/config.py
Normal file
@@ -0,0 +1,9 @@
|
||||
"""Streaming update mechanism configuration."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class StreamingConfig(BaseModel):
|
||||
"""Configuration for SSE-based task updates."""
|
||||
149
lib/crewai/src/crewai/a2a/updates/streaming/handler.py
Normal file
149
lib/crewai/src/crewai/a2a/updates/streaming/handler.py
Normal file
@@ -0,0 +1,149 @@
|
||||
"""Streaming (SSE) update mechanism handler."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import uuid
|
||||
|
||||
from a2a.client import Client
|
||||
from a2a.client.errors import A2AClientHTTPError
|
||||
from a2a.types import (
|
||||
AgentCard,
|
||||
Message,
|
||||
Part,
|
||||
Role,
|
||||
TaskArtifactUpdateEvent,
|
||||
TaskState,
|
||||
TaskStatusUpdateEvent,
|
||||
TextPart,
|
||||
)
|
||||
from typing_extensions import Unpack
|
||||
|
||||
from crewai.a2a.task_helpers import (
|
||||
ACTIONABLE_STATES,
|
||||
TERMINAL_STATES,
|
||||
TaskStateResult,
|
||||
process_task_state,
|
||||
)
|
||||
from crewai.a2a.updates.base import StreamingHandlerKwargs
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.a2a_events import A2AResponseReceivedEvent
|
||||
|
||||
|
||||
class StreamingHandler:
|
||||
"""SSE streaming-based update handler."""
|
||||
|
||||
@staticmethod
|
||||
async def execute(
|
||||
client: Client,
|
||||
message: Message,
|
||||
new_messages: list[Message],
|
||||
agent_card: AgentCard,
|
||||
**kwargs: Unpack[StreamingHandlerKwargs],
|
||||
) -> TaskStateResult:
|
||||
"""Execute A2A delegation using SSE streaming for updates.
|
||||
|
||||
Args:
|
||||
client: A2A client instance.
|
||||
message: Message to send.
|
||||
new_messages: List to collect messages.
|
||||
agent_card: The agent card.
|
||||
**kwargs: Streaming-specific parameters.
|
||||
|
||||
Returns:
|
||||
Dictionary with status, result/error, and history.
|
||||
"""
|
||||
context_id = kwargs.get("context_id")
|
||||
task_id = kwargs.get("task_id")
|
||||
turn_number = kwargs.get("turn_number", 0)
|
||||
is_multiturn = kwargs.get("is_multiturn", False)
|
||||
agent_role = kwargs.get("agent_role")
|
||||
|
||||
result_parts: list[str] = []
|
||||
final_result: TaskStateResult | None = None
|
||||
event_stream = client.send_message(message)
|
||||
|
||||
try:
|
||||
async for event in event_stream:
|
||||
if isinstance(event, Message):
|
||||
new_messages.append(event)
|
||||
for part in event.parts:
|
||||
if part.root.kind == "text":
|
||||
text = part.root.text
|
||||
result_parts.append(text)
|
||||
|
||||
elif isinstance(event, tuple):
|
||||
a2a_task, update = event
|
||||
|
||||
if isinstance(update, TaskArtifactUpdateEvent):
|
||||
artifact = update.artifact
|
||||
result_parts.extend(
|
||||
part.root.text
|
||||
for part in artifact.parts
|
||||
if part.root.kind == "text"
|
||||
)
|
||||
|
||||
is_final_update = False
|
||||
if isinstance(update, TaskStatusUpdateEvent):
|
||||
is_final_update = update.final
|
||||
|
||||
if (
|
||||
not is_final_update
|
||||
and a2a_task.status.state
|
||||
not in TERMINAL_STATES | ACTIONABLE_STATES
|
||||
):
|
||||
continue
|
||||
|
||||
final_result = process_task_state(
|
||||
a2a_task=a2a_task,
|
||||
new_messages=new_messages,
|
||||
agent_card=agent_card,
|
||||
turn_number=turn_number,
|
||||
is_multiturn=is_multiturn,
|
||||
agent_role=agent_role,
|
||||
result_parts=result_parts,
|
||||
)
|
||||
if final_result:
|
||||
break
|
||||
|
||||
except A2AClientHTTPError as e:
|
||||
error_msg = f"HTTP Error {e.status_code}: {e!s}"
|
||||
|
||||
error_message = Message(
|
||||
role=Role.agent,
|
||||
message_id=str(uuid.uuid4()),
|
||||
parts=[Part(root=TextPart(text=error_msg))],
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
)
|
||||
new_messages.append(error_message)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AResponseReceivedEvent(
|
||||
response=error_msg,
|
||||
turn_number=turn_number,
|
||||
is_multiturn=is_multiturn,
|
||||
status="failed",
|
||||
agent_role=agent_role,
|
||||
),
|
||||
)
|
||||
return TaskStateResult(
|
||||
status=TaskState.failed,
|
||||
error=error_msg,
|
||||
history=new_messages,
|
||||
)
|
||||
|
||||
finally:
|
||||
aclose = getattr(event_stream, "aclose", None)
|
||||
if aclose:
|
||||
await aclose()
|
||||
|
||||
if final_result:
|
||||
return final_result
|
||||
|
||||
return TaskStateResult(
|
||||
status=TaskState.completed,
|
||||
result=" ".join(result_parts) if result_parts else "",
|
||||
history=new_messages,
|
||||
agent_card=agent_card,
|
||||
)
|
||||
@@ -10,16 +10,13 @@ import time
|
||||
from typing import TYPE_CHECKING, Any
|
||||
import uuid
|
||||
|
||||
from a2a.client import Client, ClientConfig, ClientFactory
|
||||
from a2a.client.errors import A2AClientHTTPError
|
||||
from a2a.client import A2AClientHTTPError, Client, ClientConfig, ClientFactory
|
||||
from a2a.types import (
|
||||
AgentCard,
|
||||
Message,
|
||||
Part,
|
||||
PushNotificationConfig as A2APushNotificationConfig,
|
||||
Role,
|
||||
TaskArtifactUpdateEvent,
|
||||
TaskState,
|
||||
TaskStatusUpdateEvent,
|
||||
TextPart,
|
||||
TransportProtocol,
|
||||
)
|
||||
@@ -36,24 +33,49 @@ from crewai.a2a.auth.utils import (
|
||||
validate_auth_against_agent_card,
|
||||
)
|
||||
from crewai.a2a.config import A2AConfig
|
||||
from crewai.a2a.types import PartsDict, PartsMetadataDict
|
||||
from crewai.a2a.task_helpers import TaskStateResult
|
||||
from crewai.a2a.types import (
|
||||
HANDLER_REGISTRY,
|
||||
HandlerType,
|
||||
PartsDict,
|
||||
PartsMetadataDict,
|
||||
)
|
||||
from crewai.a2a.updates import (
|
||||
PollingConfig,
|
||||
PushNotificationConfig,
|
||||
StreamingHandler,
|
||||
UpdateConfig,
|
||||
)
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.a2a_events import (
|
||||
A2AConversationStartedEvent,
|
||||
A2ADelegationCompletedEvent,
|
||||
A2ADelegationStartedEvent,
|
||||
A2AMessageSentEvent,
|
||||
A2AResponseReceivedEvent,
|
||||
)
|
||||
from crewai.types.utils import create_literals_from_strings
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from a2a.types import Message, Task as A2ATask
|
||||
from a2a.types import Message
|
||||
|
||||
from crewai.a2a.auth.schemas import AuthScheme
|
||||
|
||||
|
||||
def get_handler(config: UpdateConfig | None) -> HandlerType:
|
||||
"""Get the handler class for a given update config.
|
||||
|
||||
Args:
|
||||
config: Update mechanism configuration.
|
||||
|
||||
Returns:
|
||||
Handler class for the config type, defaults to StreamingHandler.
|
||||
"""
|
||||
if config is None:
|
||||
return StreamingHandler
|
||||
return HANDLER_REGISTRY.get(type(config), StreamingHandler)
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def _fetch_agent_card_cached(
|
||||
endpoint: str,
|
||||
@@ -61,24 +83,14 @@ def _fetch_agent_card_cached(
|
||||
timeout: int,
|
||||
_ttl_hash: int,
|
||||
) -> AgentCard:
|
||||
"""Cached version of fetch_agent_card with auth support.
|
||||
|
||||
Args:
|
||||
endpoint: A2A agent endpoint URL
|
||||
auth_hash: Hash of the auth object
|
||||
timeout: Request timeout
|
||||
_ttl_hash: Time-based hash for cache invalidation
|
||||
|
||||
Returns:
|
||||
Cached AgentCard
|
||||
"""
|
||||
"""Cached sync version of fetch_agent_card."""
|
||||
auth = _auth_store.get(auth_hash)
|
||||
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
try:
|
||||
return loop.run_until_complete(
|
||||
_fetch_agent_card_async(endpoint=endpoint, auth=auth, timeout=timeout)
|
||||
_afetch_agent_card_impl(endpoint=endpoint, auth=auth, timeout=timeout)
|
||||
)
|
||||
finally:
|
||||
loop.close()
|
||||
@@ -128,47 +140,74 @@ def fetch_agent_card(
|
||||
asyncio.set_event_loop(loop)
|
||||
try:
|
||||
return loop.run_until_complete(
|
||||
_fetch_agent_card_async(endpoint=endpoint, auth=auth, timeout=timeout)
|
||||
afetch_agent_card(endpoint=endpoint, auth=auth, timeout=timeout)
|
||||
)
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
|
||||
async def afetch_agent_card(
|
||||
endpoint: str,
|
||||
auth: AuthScheme | None = None,
|
||||
timeout: int = 30,
|
||||
use_cache: bool = True,
|
||||
) -> AgentCard:
|
||||
"""Fetch AgentCard from an A2A endpoint asynchronously.
|
||||
|
||||
Native async implementation. Use this when running in an async context.
|
||||
|
||||
Args:
|
||||
endpoint: A2A agent endpoint URL (AgentCard URL).
|
||||
auth: Optional AuthScheme for authentication.
|
||||
timeout: Request timeout in seconds.
|
||||
use_cache: Whether to use caching (default True).
|
||||
|
||||
Returns:
|
||||
AgentCard object with agent capabilities and skills.
|
||||
|
||||
Raises:
|
||||
httpx.HTTPStatusError: If the request fails.
|
||||
A2AClientHTTPError: If authentication fails.
|
||||
"""
|
||||
if use_cache:
|
||||
if auth:
|
||||
auth_data = auth.model_dump_json(
|
||||
exclude={
|
||||
"_access_token",
|
||||
"_token_expires_at",
|
||||
"_refresh_token",
|
||||
"_authorization_callback",
|
||||
}
|
||||
)
|
||||
auth_hash = hash((type(auth).__name__, auth_data))
|
||||
else:
|
||||
auth_hash = 0
|
||||
_auth_store[auth_hash] = auth
|
||||
agent_card: AgentCard = await _afetch_agent_card_cached(
|
||||
endpoint, auth_hash, timeout
|
||||
)
|
||||
return agent_card
|
||||
|
||||
return await _afetch_agent_card_impl(endpoint=endpoint, auth=auth, timeout=timeout)
|
||||
|
||||
|
||||
@cached(ttl=300, serializer=PickleSerializer()) # type: ignore[untyped-decorator]
|
||||
async def _fetch_agent_card_async_cached(
|
||||
async def _afetch_agent_card_cached(
|
||||
endpoint: str,
|
||||
auth_hash: int,
|
||||
timeout: int,
|
||||
) -> AgentCard:
|
||||
"""Cached async implementation of AgentCard fetching.
|
||||
|
||||
Args:
|
||||
endpoint: A2A agent endpoint URL
|
||||
auth_hash: Hash of the auth object
|
||||
timeout: Request timeout in seconds
|
||||
|
||||
Returns:
|
||||
Cached AgentCard object
|
||||
"""
|
||||
"""Cached async implementation of AgentCard fetching."""
|
||||
auth = _auth_store.get(auth_hash)
|
||||
return await _fetch_agent_card_async(endpoint=endpoint, auth=auth, timeout=timeout)
|
||||
return await _afetch_agent_card_impl(endpoint=endpoint, auth=auth, timeout=timeout)
|
||||
|
||||
|
||||
async def _fetch_agent_card_async(
|
||||
async def _afetch_agent_card_impl(
|
||||
endpoint: str,
|
||||
auth: AuthScheme | None,
|
||||
timeout: int,
|
||||
) -> AgentCard:
|
||||
"""Async implementation of AgentCard fetching.
|
||||
|
||||
Args:
|
||||
endpoint: A2A agent endpoint URL
|
||||
auth: Optional AuthScheme for authentication
|
||||
timeout: Request timeout in seconds
|
||||
|
||||
Returns:
|
||||
AgentCard object
|
||||
"""
|
||||
"""Internal async implementation of AgentCard fetching."""
|
||||
if "/.well-known/agent-card.json" in endpoint:
|
||||
base_url = endpoint.replace("/.well-known/agent-card.json", "")
|
||||
agent_card_path = "/.well-known/agent-card.json"
|
||||
@@ -235,49 +274,116 @@ def execute_a2a_delegation(
|
||||
agent_branch: Any | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
turn_number: int | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Execute a task delegation to a remote A2A agent with multi-turn support.
|
||||
updates: UpdateConfig | None = None,
|
||||
) -> TaskStateResult:
|
||||
"""Execute a task delegation to a remote A2A agent synchronously.
|
||||
|
||||
Handles:
|
||||
- AgentCard discovery
|
||||
- Authentication setup
|
||||
- Message creation and sending
|
||||
- Response parsing
|
||||
- Multi-turn conversations
|
||||
This is the sync wrapper around aexecute_a2a_delegation. For async contexts,
|
||||
use aexecute_a2a_delegation directly.
|
||||
|
||||
Args:
|
||||
endpoint: A2A agent endpoint URL (AgentCard URL)
|
||||
auth: Optional AuthScheme for authentication (Bearer, OAuth2, API Key, HTTP Basic/Digest)
|
||||
timeout: Request timeout in seconds
|
||||
task_description: The task to delegate
|
||||
context: Optional context information
|
||||
context_id: Context ID for correlating messages/tasks
|
||||
task_id: Specific task identifier
|
||||
reference_task_ids: List of related task IDs
|
||||
metadata: Additional metadata (external_id, request_id, etc.)
|
||||
extensions: Protocol extensions for custom fields
|
||||
conversation_history: Previous Message objects from conversation
|
||||
agent_id: Agent identifier for logging
|
||||
agent_role: Role of the CrewAI agent delegating the task
|
||||
agent_branch: Optional agent tree branch for logging
|
||||
response_model: Optional Pydantic model for structured outputs
|
||||
turn_number: Optional turn number for multi-turn conversations
|
||||
endpoint: A2A agent endpoint URL.
|
||||
auth: Optional AuthScheme for authentication.
|
||||
timeout: Request timeout in seconds.
|
||||
task_description: The task to delegate.
|
||||
context: Optional context information.
|
||||
context_id: Context ID for correlating messages/tasks.
|
||||
task_id: Specific task identifier.
|
||||
reference_task_ids: List of related task IDs.
|
||||
metadata: Additional metadata.
|
||||
extensions: Protocol extensions for custom fields.
|
||||
conversation_history: Previous Message objects from conversation.
|
||||
agent_id: Agent identifier for logging.
|
||||
agent_role: Role of the CrewAI agent delegating the task.
|
||||
agent_branch: Optional agent tree branch for logging.
|
||||
response_model: Optional Pydantic model for structured outputs.
|
||||
turn_number: Optional turn number for multi-turn conversations.
|
||||
updates: Update mechanism config from A2AConfig.updates.
|
||||
|
||||
Returns:
|
||||
Dictionary with:
|
||||
- status: "completed", "input_required", "failed", etc.
|
||||
- result: Result string (if completed)
|
||||
- error: Error message (if failed)
|
||||
- history: List of new Message objects from this exchange
|
||||
|
||||
Raises:
|
||||
ImportError: If a2a-sdk is not installed
|
||||
TaskStateResult with status, result/error, history, and agent_card.
|
||||
"""
|
||||
is_multiturn = bool(conversation_history and len(conversation_history) > 0)
|
||||
if turn_number is None:
|
||||
turn_number = (
|
||||
len([m for m in (conversation_history or []) if m.role == Role.user]) + 1
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
try:
|
||||
return loop.run_until_complete(
|
||||
aexecute_a2a_delegation(
|
||||
endpoint=endpoint,
|
||||
auth=auth,
|
||||
timeout=timeout,
|
||||
task_description=task_description,
|
||||
context=context,
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
reference_task_ids=reference_task_ids,
|
||||
metadata=metadata,
|
||||
extensions=extensions,
|
||||
conversation_history=conversation_history,
|
||||
agent_id=agent_id,
|
||||
agent_role=agent_role,
|
||||
agent_branch=agent_branch,
|
||||
response_model=response_model,
|
||||
turn_number=turn_number,
|
||||
updates=updates,
|
||||
)
|
||||
)
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
|
||||
async def aexecute_a2a_delegation(
|
||||
endpoint: str,
|
||||
auth: AuthScheme | None,
|
||||
timeout: int,
|
||||
task_description: str,
|
||||
context: str | None = None,
|
||||
context_id: str | None = None,
|
||||
task_id: str | None = None,
|
||||
reference_task_ids: list[str] | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
extensions: dict[str, Any] | None = None,
|
||||
conversation_history: list[Message] | None = None,
|
||||
agent_id: str | None = None,
|
||||
agent_role: Role | None = None,
|
||||
agent_branch: Any | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
turn_number: int | None = None,
|
||||
updates: UpdateConfig | None = None,
|
||||
) -> TaskStateResult:
|
||||
"""Execute a task delegation to a remote A2A agent asynchronously.
|
||||
|
||||
Native async implementation with multi-turn support. Use this when running
|
||||
in an async context (e.g., with Crew.akickoff() or agent.aexecute_task()).
|
||||
|
||||
Args:
|
||||
endpoint: A2A agent endpoint URL.
|
||||
auth: Optional AuthScheme for authentication.
|
||||
timeout: Request timeout in seconds.
|
||||
task_description: The task to delegate.
|
||||
context: Optional context information.
|
||||
context_id: Context ID for correlating messages/tasks.
|
||||
task_id: Specific task identifier.
|
||||
reference_task_ids: List of related task IDs.
|
||||
metadata: Additional metadata.
|
||||
extensions: Protocol extensions for custom fields.
|
||||
conversation_history: Previous Message objects from conversation.
|
||||
agent_id: Agent identifier for logging.
|
||||
agent_role: Role of the CrewAI agent delegating the task.
|
||||
agent_branch: Optional agent tree branch for logging.
|
||||
response_model: Optional Pydantic model for structured outputs.
|
||||
turn_number: Optional turn number for multi-turn conversations.
|
||||
updates: Update mechanism config from A2AConfig.updates.
|
||||
|
||||
Returns:
|
||||
TaskStateResult with status, result/error, history, and agent_card.
|
||||
"""
|
||||
if conversation_history is None:
|
||||
conversation_history = []
|
||||
|
||||
is_multiturn = len(conversation_history) > 0
|
||||
if turn_number is None:
|
||||
turn_number = len([m for m in conversation_history if m.role == Role.user]) + 1
|
||||
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2ADelegationStartedEvent(
|
||||
@@ -289,47 +395,41 @@ def execute_a2a_delegation(
|
||||
),
|
||||
)
|
||||
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
try:
|
||||
result = loop.run_until_complete(
|
||||
_execute_a2a_delegation_async(
|
||||
endpoint=endpoint,
|
||||
auth=auth,
|
||||
timeout=timeout,
|
||||
task_description=task_description,
|
||||
context=context,
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
reference_task_ids=reference_task_ids,
|
||||
metadata=metadata,
|
||||
extensions=extensions,
|
||||
conversation_history=conversation_history or [],
|
||||
is_multiturn=is_multiturn,
|
||||
turn_number=turn_number,
|
||||
agent_branch=agent_branch,
|
||||
agent_id=agent_id,
|
||||
agent_role=agent_role,
|
||||
response_model=response_model,
|
||||
)
|
||||
)
|
||||
result = await _aexecute_a2a_delegation_impl(
|
||||
endpoint=endpoint,
|
||||
auth=auth,
|
||||
timeout=timeout,
|
||||
task_description=task_description,
|
||||
context=context,
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
reference_task_ids=reference_task_ids,
|
||||
metadata=metadata,
|
||||
extensions=extensions,
|
||||
conversation_history=conversation_history,
|
||||
is_multiturn=is_multiturn,
|
||||
turn_number=turn_number,
|
||||
agent_branch=agent_branch,
|
||||
agent_id=agent_id,
|
||||
agent_role=agent_role,
|
||||
response_model=response_model,
|
||||
updates=updates,
|
||||
)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2ADelegationCompletedEvent(
|
||||
status=result["status"],
|
||||
result=result.get("result"),
|
||||
error=result.get("error"),
|
||||
is_multiturn=is_multiturn,
|
||||
),
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2ADelegationCompletedEvent(
|
||||
status=result["status"],
|
||||
result=result.get("result"),
|
||||
error=result.get("error"),
|
||||
is_multiturn=is_multiturn,
|
||||
),
|
||||
)
|
||||
|
||||
return result
|
||||
finally:
|
||||
loop.close()
|
||||
return result
|
||||
|
||||
|
||||
async def _execute_a2a_delegation_async(
|
||||
async def _aexecute_a2a_delegation_impl(
|
||||
endpoint: str,
|
||||
auth: AuthScheme | None,
|
||||
timeout: int,
|
||||
@@ -341,37 +441,15 @@ async def _execute_a2a_delegation_async(
|
||||
metadata: dict[str, Any] | None,
|
||||
extensions: dict[str, Any] | None,
|
||||
conversation_history: list[Message],
|
||||
is_multiturn: bool = False,
|
||||
turn_number: int = 1,
|
||||
agent_branch: Any | None = None,
|
||||
agent_id: str | None = None,
|
||||
agent_role: str | None = None,
|
||||
response_model: type[BaseModel] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Async implementation of A2A delegation with multi-turn support.
|
||||
|
||||
Args:
|
||||
endpoint: A2A agent endpoint URL
|
||||
auth: Optional AuthScheme for authentication
|
||||
timeout: Request timeout in seconds
|
||||
task_description: Task to delegate
|
||||
context: Optional context
|
||||
context_id: Context ID for correlation
|
||||
task_id: Specific task identifier
|
||||
reference_task_ids: Related task IDs
|
||||
metadata: Additional metadata
|
||||
extensions: Protocol extensions
|
||||
conversation_history: Previous Message objects
|
||||
is_multiturn: Whether this is a multi-turn conversation
|
||||
turn_number: Current turn number
|
||||
agent_branch: Agent tree branch for logging
|
||||
agent_id: Agent identifier for logging
|
||||
agent_role: Agent role for logging
|
||||
response_model: Optional Pydantic model for structured outputs
|
||||
|
||||
Returns:
|
||||
Dictionary with status, result/error, and new history
|
||||
"""
|
||||
is_multiturn: bool,
|
||||
turn_number: int,
|
||||
agent_branch: Any | None,
|
||||
agent_id: str | None,
|
||||
agent_role: str | None,
|
||||
response_model: type[BaseModel] | None,
|
||||
updates: UpdateConfig | None,
|
||||
) -> TaskStateResult:
|
||||
"""Internal async implementation of A2A delegation."""
|
||||
if auth:
|
||||
auth_data = auth.model_dump_json(
|
||||
exclude={
|
||||
@@ -385,7 +463,7 @@ async def _execute_a2a_delegation_async(
|
||||
else:
|
||||
auth_hash = 0
|
||||
_auth_store[auth_hash] = auth
|
||||
agent_card = await _fetch_agent_card_async_cached(
|
||||
agent_card = await _afetch_agent_card_cached(
|
||||
endpoint=endpoint, auth_hash=auth_hash, timeout=timeout
|
||||
)
|
||||
|
||||
@@ -458,201 +536,61 @@ async def _execute_a2a_delegation_async(
|
||||
),
|
||||
)
|
||||
|
||||
handler = get_handler(updates)
|
||||
use_polling = isinstance(updates, PollingConfig)
|
||||
|
||||
handler_kwargs: dict[str, Any] = {
|
||||
"turn_number": turn_number,
|
||||
"is_multiturn": is_multiturn,
|
||||
"agent_role": agent_role,
|
||||
"context_id": context_id,
|
||||
"task_id": task_id,
|
||||
"endpoint": endpoint,
|
||||
"agent_branch": agent_branch,
|
||||
}
|
||||
|
||||
if isinstance(updates, PollingConfig):
|
||||
handler_kwargs.update(
|
||||
{
|
||||
"polling_interval": updates.interval,
|
||||
"polling_timeout": updates.timeout or float(timeout),
|
||||
"history_length": updates.history_length,
|
||||
"max_polls": updates.max_polls,
|
||||
}
|
||||
)
|
||||
elif isinstance(updates, PushNotificationConfig):
|
||||
handler_kwargs.update(
|
||||
{
|
||||
"config": updates,
|
||||
"result_store": updates.result_store,
|
||||
"polling_timeout": updates.timeout or float(timeout),
|
||||
"polling_interval": updates.interval,
|
||||
}
|
||||
)
|
||||
|
||||
push_config_for_client = (
|
||||
updates if isinstance(updates, PushNotificationConfig) else None
|
||||
)
|
||||
|
||||
use_streaming = not use_polling and push_config_for_client is None
|
||||
|
||||
async with _create_a2a_client(
|
||||
agent_card=agent_card,
|
||||
transport_protocol=transport_protocol,
|
||||
timeout=timeout,
|
||||
headers=headers,
|
||||
streaming=True,
|
||||
streaming=use_streaming,
|
||||
auth=auth,
|
||||
use_polling=use_polling,
|
||||
push_notification_config=push_config_for_client,
|
||||
) as client:
|
||||
result_parts: list[str] = []
|
||||
final_result: dict[str, Any] | None = None
|
||||
event_stream = client.send_message(message)
|
||||
|
||||
try:
|
||||
async for event in event_stream:
|
||||
if isinstance(event, Message):
|
||||
new_messages.append(event)
|
||||
for part in event.parts:
|
||||
if part.root.kind == "text":
|
||||
text = part.root.text
|
||||
result_parts.append(text)
|
||||
|
||||
elif isinstance(event, tuple):
|
||||
a2a_task, update = event
|
||||
|
||||
if isinstance(update, TaskArtifactUpdateEvent):
|
||||
artifact = update.artifact
|
||||
result_parts.extend(
|
||||
part.root.text
|
||||
for part in artifact.parts
|
||||
if part.root.kind == "text"
|
||||
)
|
||||
|
||||
is_final_update = False
|
||||
if isinstance(update, TaskStatusUpdateEvent):
|
||||
is_final_update = update.final
|
||||
|
||||
if not is_final_update and a2a_task.status.state not in [
|
||||
TaskState.completed,
|
||||
TaskState.input_required,
|
||||
TaskState.failed,
|
||||
TaskState.rejected,
|
||||
TaskState.auth_required,
|
||||
TaskState.canceled,
|
||||
]:
|
||||
continue
|
||||
|
||||
if a2a_task.status.state == TaskState.completed:
|
||||
extracted_parts = _extract_task_result_parts(a2a_task)
|
||||
result_parts.extend(extracted_parts)
|
||||
if a2a_task.history:
|
||||
new_messages.extend(a2a_task.history)
|
||||
|
||||
response_text = " ".join(result_parts) if result_parts else ""
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AResponseReceivedEvent(
|
||||
response=response_text,
|
||||
turn_number=turn_number,
|
||||
is_multiturn=is_multiturn,
|
||||
status="completed",
|
||||
agent_role=agent_role,
|
||||
),
|
||||
)
|
||||
|
||||
final_result = {
|
||||
"status": "completed",
|
||||
"result": response_text,
|
||||
"history": new_messages,
|
||||
"agent_card": agent_card,
|
||||
}
|
||||
break
|
||||
|
||||
if a2a_task.status.state == TaskState.input_required:
|
||||
if a2a_task.history:
|
||||
new_messages.extend(a2a_task.history)
|
||||
|
||||
response_text = _extract_error_message(
|
||||
a2a_task, "Additional input required"
|
||||
)
|
||||
if response_text and not a2a_task.history:
|
||||
agent_message = Message(
|
||||
role=Role.agent,
|
||||
message_id=str(uuid.uuid4()),
|
||||
parts=[Part(root=TextPart(text=response_text))],
|
||||
context_id=a2a_task.context_id
|
||||
if hasattr(a2a_task, "context_id")
|
||||
else None,
|
||||
task_id=a2a_task.task_id
|
||||
if hasattr(a2a_task, "task_id")
|
||||
else None,
|
||||
)
|
||||
new_messages.append(agent_message)
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AResponseReceivedEvent(
|
||||
response=response_text,
|
||||
turn_number=turn_number,
|
||||
is_multiturn=is_multiturn,
|
||||
status="input_required",
|
||||
agent_role=agent_role,
|
||||
),
|
||||
)
|
||||
|
||||
final_result = {
|
||||
"status": "input_required",
|
||||
"error": response_text,
|
||||
"history": new_messages,
|
||||
"agent_card": agent_card,
|
||||
}
|
||||
break
|
||||
|
||||
if a2a_task.status.state in [TaskState.failed, TaskState.rejected]:
|
||||
error_msg = _extract_error_message(
|
||||
a2a_task, "Task failed without error message"
|
||||
)
|
||||
if a2a_task.history:
|
||||
new_messages.extend(a2a_task.history)
|
||||
final_result = {
|
||||
"status": "failed",
|
||||
"error": error_msg,
|
||||
"history": new_messages,
|
||||
}
|
||||
break
|
||||
|
||||
if a2a_task.status.state == TaskState.auth_required:
|
||||
error_msg = _extract_error_message(
|
||||
a2a_task, "Authentication required"
|
||||
)
|
||||
final_result = {
|
||||
"status": "auth_required",
|
||||
"error": error_msg,
|
||||
"history": new_messages,
|
||||
}
|
||||
break
|
||||
|
||||
if a2a_task.status.state == TaskState.canceled:
|
||||
error_msg = _extract_error_message(
|
||||
a2a_task, "Task was canceled"
|
||||
)
|
||||
final_result = {
|
||||
"status": "canceled",
|
||||
"error": error_msg,
|
||||
"history": new_messages,
|
||||
}
|
||||
break
|
||||
except Exception as e:
|
||||
if isinstance(e, A2AClientHTTPError):
|
||||
error_msg = f"HTTP Error {e.status_code}: {e!s}"
|
||||
|
||||
error_message = Message(
|
||||
role=Role.agent,
|
||||
message_id=str(uuid.uuid4()),
|
||||
parts=[Part(root=TextPart(text=error_msg))],
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
)
|
||||
new_messages.append(error_message)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AResponseReceivedEvent(
|
||||
response=error_msg,
|
||||
turn_number=turn_number,
|
||||
is_multiturn=is_multiturn,
|
||||
status="failed",
|
||||
agent_role=agent_role,
|
||||
),
|
||||
)
|
||||
return {
|
||||
"status": "failed",
|
||||
"error": error_msg,
|
||||
"history": new_messages,
|
||||
}
|
||||
|
||||
current_exception: Exception | BaseException | None = e
|
||||
while current_exception:
|
||||
if hasattr(current_exception, "response"):
|
||||
response = current_exception.response
|
||||
if hasattr(response, "text"):
|
||||
break
|
||||
if current_exception and hasattr(current_exception, "__cause__"):
|
||||
current_exception = current_exception.__cause__
|
||||
raise
|
||||
finally:
|
||||
if hasattr(event_stream, "aclose"):
|
||||
await event_stream.aclose()
|
||||
|
||||
if final_result:
|
||||
return final_result
|
||||
|
||||
return {
|
||||
"status": "completed",
|
||||
"result": " ".join(result_parts) if result_parts else "",
|
||||
"history": new_messages,
|
||||
}
|
||||
return await handler.execute(
|
||||
client=client,
|
||||
message=message,
|
||||
new_messages=new_messages,
|
||||
agent_card=agent_card,
|
||||
**handler_kwargs,
|
||||
)
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
@@ -663,6 +601,8 @@ async def _create_a2a_client(
|
||||
headers: MutableMapping[str, str],
|
||||
streaming: bool,
|
||||
auth: AuthScheme | None = None,
|
||||
use_polling: bool = False,
|
||||
push_notification_config: PushNotificationConfig | None = None,
|
||||
) -> AsyncIterator[Client]:
|
||||
"""Create and configure an A2A client.
|
||||
|
||||
@@ -673,6 +613,8 @@ async def _create_a2a_client(
|
||||
headers: HTTP headers (already with auth applied)
|
||||
streaming: Enable streaming responses
|
||||
auth: Optional AuthScheme for client configuration
|
||||
use_polling: Enable polling mode
|
||||
push_notification_config: Optional push notification config to include in requests
|
||||
|
||||
Yields:
|
||||
Configured A2A client instance
|
||||
@@ -685,11 +627,24 @@ async def _create_a2a_client(
|
||||
if auth and isinstance(auth, (HTTPDigestAuth, APIKeyAuth)):
|
||||
configure_auth_client(auth, httpx_client)
|
||||
|
||||
push_configs: list[A2APushNotificationConfig] = []
|
||||
if push_notification_config is not None:
|
||||
push_configs.append(
|
||||
A2APushNotificationConfig(
|
||||
url=str(push_notification_config.url),
|
||||
id=push_notification_config.id,
|
||||
token=push_notification_config.token,
|
||||
authentication=push_notification_config.authentication,
|
||||
)
|
||||
)
|
||||
|
||||
config = ClientConfig(
|
||||
httpx_client=httpx_client,
|
||||
supported_transports=[str(transport_protocol.value)],
|
||||
streaming=streaming,
|
||||
streaming=streaming and not use_polling,
|
||||
polling=use_polling,
|
||||
accepted_output_modes=["application/json"],
|
||||
push_notification_configs=push_configs,
|
||||
)
|
||||
|
||||
factory = ClientFactory(config)
|
||||
@@ -697,66 +652,6 @@ async def _create_a2a_client(
|
||||
yield client
|
||||
|
||||
|
||||
def _extract_task_result_parts(a2a_task: A2ATask) -> list[str]:
|
||||
"""Extract result parts from A2A task history and artifacts.
|
||||
|
||||
Args:
|
||||
a2a_task: A2A Task object with history and artifacts
|
||||
|
||||
Returns:
|
||||
List of result text parts
|
||||
"""
|
||||
|
||||
result_parts: list[str] = []
|
||||
|
||||
if a2a_task.history:
|
||||
for history_msg in reversed(a2a_task.history):
|
||||
if history_msg.role == Role.agent:
|
||||
result_parts.extend(
|
||||
part.root.text
|
||||
for part in history_msg.parts
|
||||
if part.root.kind == "text"
|
||||
)
|
||||
break
|
||||
|
||||
if a2a_task.artifacts:
|
||||
result_parts.extend(
|
||||
part.root.text
|
||||
for artifact in a2a_task.artifacts
|
||||
for part in artifact.parts
|
||||
if part.root.kind == "text"
|
||||
)
|
||||
|
||||
return result_parts
|
||||
|
||||
|
||||
def _extract_error_message(a2a_task: A2ATask, default: str) -> str:
|
||||
"""Extract error message from A2A task.
|
||||
|
||||
Args:
|
||||
a2a_task: A2A Task object
|
||||
default: Default message if no error found
|
||||
|
||||
Returns:
|
||||
Error message string
|
||||
"""
|
||||
if a2a_task.status and a2a_task.status.message:
|
||||
msg = a2a_task.status.message
|
||||
if msg:
|
||||
for part in msg.parts:
|
||||
if part.root.kind == "text":
|
||||
return str(part.root.text)
|
||||
return str(msg)
|
||||
|
||||
if a2a_task.history:
|
||||
for history_msg in reversed(a2a_task.history):
|
||||
for part in history_msg.parts:
|
||||
if part.root.kind == "text":
|
||||
return str(part.root.text)
|
||||
|
||||
return default
|
||||
|
||||
|
||||
def create_agent_response_model(agent_ids: tuple[str, ...]) -> type[BaseModel]:
|
||||
"""Create a dynamic AgentResponse model with Literal types for agent IDs.
|
||||
|
||||
@@ -788,7 +683,7 @@ def create_agent_response_model(agent_ids: tuple[str, ...]) -> type[BaseModel]:
|
||||
is_a2a=(
|
||||
bool,
|
||||
Field(
|
||||
description="Set to true to continue the conversation by sending this message to the A2A agent and awaiting their response. Set to false ONLY when you are completely done and providing your final answer (not when asking questions)."
|
||||
description="Set to false when the remote agent has answered your question - extract their answer and return it as your final message. Set to true ONLY if you need to ask a NEW, DIFFERENT question. NEVER repeat the same request - if the conversation history shows the agent already answered, set is_a2a=false immediately."
|
||||
),
|
||||
),
|
||||
__base__=BaseModel,
|
||||
|
||||
@@ -5,25 +5,30 @@ Wraps agent classes with A2A delegation capabilities.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Callable
|
||||
import asyncio
|
||||
from collections.abc import Callable, Coroutine
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from functools import wraps
|
||||
from types import MethodType
|
||||
from typing import TYPE_CHECKING, Any, cast
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from a2a.types import Role
|
||||
from a2a.types import Role, TaskState
|
||||
from pydantic import BaseModel, ValidationError
|
||||
|
||||
from crewai.a2a.config import A2AConfig
|
||||
from crewai.a2a.extensions.base import ExtensionRegistry
|
||||
from crewai.a2a.task_helpers import TaskStateResult
|
||||
from crewai.a2a.templates import (
|
||||
AVAILABLE_AGENTS_TEMPLATE,
|
||||
CONVERSATION_TURN_INFO_TEMPLATE,
|
||||
PREVIOUS_A2A_CONVERSATION_TEMPLATE,
|
||||
REMOTE_AGENT_COMPLETED_NOTICE,
|
||||
UNAVAILABLE_AGENTS_NOTICE_TEMPLATE,
|
||||
)
|
||||
from crewai.a2a.types import AgentResponseProtocol
|
||||
from crewai.a2a.utils import (
|
||||
aexecute_a2a_delegation,
|
||||
afetch_agent_card,
|
||||
execute_a2a_delegation,
|
||||
fetch_agent_card,
|
||||
get_a2a_agents_and_response_model,
|
||||
@@ -46,15 +51,15 @@ if TYPE_CHECKING:
|
||||
def wrap_agent_with_a2a_instance(
|
||||
agent: Agent, extension_registry: ExtensionRegistry | None = None
|
||||
) -> None:
|
||||
"""Wrap an agent instance's execute_task method with A2A support.
|
||||
"""Wrap an agent instance's execute_task and aexecute_task methods with A2A support.
|
||||
|
||||
This function modifies the agent instance by wrapping its execute_task
|
||||
method to add A2A delegation capabilities. Should only be called when
|
||||
the agent has a2a configuration set.
|
||||
and aexecute_task methods to add A2A delegation capabilities. Should only
|
||||
be called when the agent has a2a configuration set.
|
||||
|
||||
Args:
|
||||
agent: The agent instance to wrap
|
||||
extension_registry: Optional registry of A2A extensions for injecting tools and custom logic
|
||||
agent: The agent instance to wrap.
|
||||
extension_registry: Optional registry of A2A extensions.
|
||||
"""
|
||||
if extension_registry is None:
|
||||
extension_registry = ExtensionRegistry()
|
||||
@@ -62,6 +67,7 @@ def wrap_agent_with_a2a_instance(
|
||||
extension_registry.inject_all_tools(agent)
|
||||
|
||||
original_execute_task = agent.execute_task.__func__ # type: ignore[attr-defined]
|
||||
original_aexecute_task = agent.aexecute_task.__func__ # type: ignore[attr-defined]
|
||||
|
||||
@wraps(original_execute_task)
|
||||
def execute_task_with_a2a(
|
||||
@@ -70,17 +76,7 @@ def wrap_agent_with_a2a_instance(
|
||||
context: str | None = None,
|
||||
tools: list[BaseTool] | None = None,
|
||||
) -> str:
|
||||
"""Execute task with A2A delegation support.
|
||||
|
||||
Args:
|
||||
self: The agent instance
|
||||
task: The task to execute
|
||||
context: Optional context for task execution
|
||||
tools: Optional tools available to the agent
|
||||
|
||||
Returns:
|
||||
Task execution result
|
||||
"""
|
||||
"""Execute task with A2A delegation support (sync)."""
|
||||
if not self.a2a:
|
||||
return original_execute_task(self, task, context, tools) # type: ignore[no-any-return]
|
||||
|
||||
@@ -97,7 +93,34 @@ def wrap_agent_with_a2a_instance(
|
||||
extension_registry=extension_registry,
|
||||
)
|
||||
|
||||
@wraps(original_aexecute_task)
|
||||
async def aexecute_task_with_a2a(
|
||||
self: Agent,
|
||||
task: Task,
|
||||
context: str | None = None,
|
||||
tools: list[BaseTool] | None = None,
|
||||
) -> str:
|
||||
"""Execute task with A2A delegation support (async)."""
|
||||
if not self.a2a:
|
||||
return await original_aexecute_task(self, task, context, tools) # type: ignore[no-any-return]
|
||||
|
||||
a2a_agents, agent_response_model = get_a2a_agents_and_response_model(self.a2a)
|
||||
|
||||
return await _aexecute_task_with_a2a(
|
||||
self=self,
|
||||
a2a_agents=a2a_agents,
|
||||
original_fn=original_aexecute_task,
|
||||
task=task,
|
||||
agent_response_model=agent_response_model,
|
||||
context=context,
|
||||
tools=tools,
|
||||
extension_registry=extension_registry,
|
||||
)
|
||||
|
||||
object.__setattr__(agent, "execute_task", MethodType(execute_task_with_a2a, agent))
|
||||
object.__setattr__(
|
||||
agent, "aexecute_task", MethodType(aexecute_task_with_a2a, agent)
|
||||
)
|
||||
|
||||
|
||||
def _fetch_card_from_config(
|
||||
@@ -255,6 +278,7 @@ def _augment_prompt_with_a2a(
|
||||
max_turns: int | None = None,
|
||||
failed_agents: dict[str, str] | None = None,
|
||||
extension_registry: ExtensionRegistry | None = None,
|
||||
remote_task_completed: bool = False,
|
||||
) -> tuple[str, bool]:
|
||||
"""Add A2A delegation instructions to prompt.
|
||||
|
||||
@@ -327,12 +351,15 @@ def _augment_prompt_with_a2a(
|
||||
warning=warning,
|
||||
)
|
||||
|
||||
completion_notice = ""
|
||||
if remote_task_completed and conversation_history:
|
||||
completion_notice = REMOTE_AGENT_COMPLETED_NOTICE
|
||||
|
||||
augmented_prompt = f"""{task_description}
|
||||
|
||||
IMPORTANT: You have the ability to delegate this task to remote A2A agents.
|
||||
{agents_text}
|
||||
{history_text}{turn_info}
|
||||
|
||||
{history_text}{turn_info}{completion_notice}
|
||||
|
||||
"""
|
||||
|
||||
@@ -346,16 +373,8 @@ IMPORTANT: You have the ability to delegate this task to remote A2A agents.
|
||||
|
||||
def _parse_agent_response(
|
||||
raw_result: str | dict[str, Any], agent_response_model: type[BaseModel]
|
||||
) -> BaseModel | str:
|
||||
"""Parse LLM output as AgentResponse or return raw agent response.
|
||||
|
||||
Args:
|
||||
raw_result: Raw output from LLM
|
||||
agent_response_model: The agent response model
|
||||
|
||||
Returns:
|
||||
Parsed AgentResponse or string
|
||||
"""
|
||||
) -> BaseModel | str | dict[str, Any]:
|
||||
"""Parse LLM output as AgentResponse or return raw agent response."""
|
||||
if agent_response_model:
|
||||
try:
|
||||
if isinstance(raw_result, str):
|
||||
@@ -363,71 +382,70 @@ def _parse_agent_response(
|
||||
if isinstance(raw_result, dict):
|
||||
return agent_response_model.model_validate(raw_result)
|
||||
except ValidationError:
|
||||
return cast(str, raw_result)
|
||||
return cast(str, raw_result)
|
||||
return raw_result
|
||||
return raw_result
|
||||
|
||||
|
||||
def _handle_agent_response_and_continue(
|
||||
self: Agent,
|
||||
a2a_result: dict[str, Any],
|
||||
agent_id: str,
|
||||
agent_cards: dict[str, AgentCard] | None,
|
||||
a2a_agents: list[A2AConfig],
|
||||
original_task_description: str,
|
||||
def _handle_max_turns_exceeded(
|
||||
conversation_history: list[Message],
|
||||
turn_num: int,
|
||||
max_turns: int,
|
||||
task: Task,
|
||||
original_fn: Callable[..., str],
|
||||
context: str | None,
|
||||
tools: list[BaseTool] | None,
|
||||
agent_response_model: type[BaseModel],
|
||||
) -> tuple[str | None, str | None]:
|
||||
"""Handle A2A result and get CrewAI agent's response.
|
||||
) -> str:
|
||||
"""Handle the case when max turns is exceeded.
|
||||
|
||||
Args:
|
||||
self: The agent instance
|
||||
a2a_result: Result from A2A delegation
|
||||
agent_id: ID of the A2A agent
|
||||
agent_cards: Pre-fetched agent cards
|
||||
a2a_agents: List of A2A configurations
|
||||
original_task_description: Original task description
|
||||
conversation_history: Conversation history
|
||||
turn_num: Current turn number
|
||||
max_turns: Maximum turns allowed
|
||||
task: The task being executed
|
||||
original_fn: Original execute_task method
|
||||
context: Optional context
|
||||
tools: Optional tools
|
||||
agent_response_model: Response model for parsing
|
||||
Shared logic for both sync and async delegation.
|
||||
|
||||
Returns:
|
||||
Tuple of (final_result, current_request) where:
|
||||
- final_result is not None if conversation should end
|
||||
- current_request is the next message to send if continuing
|
||||
Final message if found in history.
|
||||
|
||||
Raises:
|
||||
Exception: If no final message found and max turns exceeded.
|
||||
"""
|
||||
agent_cards_dict = agent_cards or {}
|
||||
if "agent_card" in a2a_result and agent_id not in agent_cards_dict:
|
||||
agent_cards_dict[agent_id] = a2a_result["agent_card"]
|
||||
if conversation_history:
|
||||
for msg in reversed(conversation_history):
|
||||
if msg.role == Role.agent:
|
||||
text_parts = [
|
||||
part.root.text for part in msg.parts if part.root.kind == "text"
|
||||
]
|
||||
final_message = (
|
||||
" ".join(text_parts) if text_parts else "Conversation completed"
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AConversationCompletedEvent(
|
||||
status="completed",
|
||||
final_result=final_message,
|
||||
error=None,
|
||||
total_turns=max_turns,
|
||||
),
|
||||
)
|
||||
return final_message
|
||||
|
||||
task.description, disable_structured_output = _augment_prompt_with_a2a(
|
||||
a2a_agents=a2a_agents,
|
||||
task_description=original_task_description,
|
||||
conversation_history=conversation_history,
|
||||
turn_num=turn_num,
|
||||
max_turns=max_turns,
|
||||
agent_cards=agent_cards_dict,
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AConversationCompletedEvent(
|
||||
status="failed",
|
||||
final_result=None,
|
||||
error=f"Conversation exceeded maximum turns ({max_turns})",
|
||||
total_turns=max_turns,
|
||||
),
|
||||
)
|
||||
raise Exception(f"A2A conversation exceeded maximum turns ({max_turns})")
|
||||
|
||||
original_response_model = task.response_model
|
||||
if disable_structured_output:
|
||||
task.response_model = None
|
||||
|
||||
raw_result = original_fn(self, task, context, tools)
|
||||
def _process_response_result(
|
||||
raw_result: str,
|
||||
disable_structured_output: bool,
|
||||
turn_num: int,
|
||||
agent_role: str,
|
||||
agent_response_model: type[BaseModel],
|
||||
) -> tuple[str | None, str | None]:
|
||||
"""Process LLM response and determine next action.
|
||||
|
||||
if disable_structured_output:
|
||||
task.response_model = original_response_model
|
||||
Shared logic for both sync and async handlers.
|
||||
|
||||
Returns:
|
||||
Tuple of (final_result, next_request).
|
||||
"""
|
||||
if disable_structured_output:
|
||||
final_turn_number = turn_num + 1
|
||||
result_text = str(raw_result)
|
||||
@@ -437,7 +455,7 @@ def _handle_agent_response_and_continue(
|
||||
message=result_text,
|
||||
turn_number=final_turn_number,
|
||||
is_multiturn=True,
|
||||
agent_role=self.role,
|
||||
agent_role=agent_role,
|
||||
),
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
@@ -466,7 +484,7 @@ def _handle_agent_response_and_continue(
|
||||
message=str(llm_response.message),
|
||||
turn_number=final_turn_number,
|
||||
is_multiturn=True,
|
||||
agent_role=self.role,
|
||||
agent_role=agent_role,
|
||||
),
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
@@ -484,6 +502,200 @@ def _handle_agent_response_and_continue(
|
||||
return str(raw_result), None
|
||||
|
||||
|
||||
def _prepare_agent_cards_dict(
|
||||
a2a_result: TaskStateResult,
|
||||
agent_id: str,
|
||||
agent_cards: dict[str, AgentCard] | None,
|
||||
) -> dict[str, AgentCard]:
|
||||
"""Prepare agent cards dictionary from result and existing cards.
|
||||
|
||||
Shared logic for both sync and async response handlers.
|
||||
"""
|
||||
agent_cards_dict = agent_cards or {}
|
||||
if "agent_card" in a2a_result and agent_id not in agent_cards_dict:
|
||||
agent_cards_dict[agent_id] = a2a_result["agent_card"]
|
||||
return agent_cards_dict
|
||||
|
||||
|
||||
def _prepare_delegation_context(
|
||||
self: Agent,
|
||||
agent_response: AgentResponseProtocol,
|
||||
task: Task,
|
||||
original_task_description: str | None,
|
||||
) -> tuple[
|
||||
list[A2AConfig],
|
||||
type[BaseModel],
|
||||
str,
|
||||
str,
|
||||
A2AConfig,
|
||||
str | None,
|
||||
str | None,
|
||||
dict[str, Any] | None,
|
||||
dict[str, Any] | None,
|
||||
list[str],
|
||||
str,
|
||||
int,
|
||||
]:
|
||||
"""Prepare delegation context from agent response and task.
|
||||
|
||||
Shared logic for both sync and async delegation.
|
||||
|
||||
Returns:
|
||||
Tuple containing all the context values needed for delegation.
|
||||
"""
|
||||
a2a_agents, agent_response_model = get_a2a_agents_and_response_model(self.a2a)
|
||||
agent_ids = tuple(config.endpoint for config in a2a_agents)
|
||||
current_request = str(agent_response.message)
|
||||
|
||||
if hasattr(agent_response, "a2a_ids") and agent_response.a2a_ids:
|
||||
agent_id = agent_response.a2a_ids[0]
|
||||
else:
|
||||
agent_id = agent_ids[0] if agent_ids else ""
|
||||
|
||||
if agent_id and agent_id not in agent_ids:
|
||||
raise ValueError(
|
||||
f"Unknown A2A agent ID(s): {agent_response.a2a_ids} not in {agent_ids}"
|
||||
)
|
||||
|
||||
agent_config = next(filter(lambda x: x.endpoint == agent_id, a2a_agents))
|
||||
task_config = task.config or {}
|
||||
context_id = task_config.get("context_id")
|
||||
task_id_config = task_config.get("task_id")
|
||||
metadata = task_config.get("metadata")
|
||||
extensions = task_config.get("extensions")
|
||||
reference_task_ids = task_config.get("reference_task_ids", [])
|
||||
|
||||
if original_task_description is None:
|
||||
original_task_description = task.description
|
||||
|
||||
max_turns = agent_config.max_turns
|
||||
|
||||
return (
|
||||
a2a_agents,
|
||||
agent_response_model,
|
||||
current_request,
|
||||
agent_id,
|
||||
agent_config,
|
||||
context_id,
|
||||
task_id_config,
|
||||
metadata,
|
||||
extensions,
|
||||
reference_task_ids,
|
||||
original_task_description,
|
||||
max_turns,
|
||||
)
|
||||
|
||||
|
||||
def _handle_task_completion(
|
||||
a2a_result: TaskStateResult,
|
||||
task: Task,
|
||||
task_id_config: str | None,
|
||||
reference_task_ids: list[str],
|
||||
agent_config: A2AConfig,
|
||||
turn_num: int,
|
||||
) -> tuple[str | None, str | None, list[str]]:
|
||||
"""Handle task completion state including reference task updates.
|
||||
|
||||
Shared logic for both sync and async delegation.
|
||||
|
||||
Returns:
|
||||
Tuple of (result_if_trusted, updated_task_id, updated_reference_task_ids).
|
||||
"""
|
||||
if a2a_result["status"] == TaskState.completed:
|
||||
if task_id_config is not None and task_id_config not in reference_task_ids:
|
||||
reference_task_ids.append(task_id_config)
|
||||
if task.config is None:
|
||||
task.config = {}
|
||||
task.config["reference_task_ids"] = reference_task_ids
|
||||
task_id_config = None
|
||||
|
||||
if agent_config.trust_remote_completion_status:
|
||||
result_text = a2a_result.get("result", "")
|
||||
final_turn_number = turn_num + 1
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AConversationCompletedEvent(
|
||||
status="completed",
|
||||
final_result=result_text,
|
||||
error=None,
|
||||
total_turns=final_turn_number,
|
||||
),
|
||||
)
|
||||
return str(result_text), task_id_config, reference_task_ids
|
||||
|
||||
return None, task_id_config, reference_task_ids
|
||||
|
||||
|
||||
def _handle_agent_response_and_continue(
|
||||
self: Agent,
|
||||
a2a_result: TaskStateResult,
|
||||
agent_id: str,
|
||||
agent_cards: dict[str, AgentCard] | None,
|
||||
a2a_agents: list[A2AConfig],
|
||||
original_task_description: str,
|
||||
conversation_history: list[Message],
|
||||
turn_num: int,
|
||||
max_turns: int,
|
||||
task: Task,
|
||||
original_fn: Callable[..., str],
|
||||
context: str | None,
|
||||
tools: list[BaseTool] | None,
|
||||
agent_response_model: type[BaseModel],
|
||||
remote_task_completed: bool = False,
|
||||
) -> tuple[str | None, str | None]:
|
||||
"""Handle A2A result and get CrewAI agent's response.
|
||||
|
||||
Args:
|
||||
self: The agent instance
|
||||
a2a_result: Result from A2A delegation
|
||||
agent_id: ID of the A2A agent
|
||||
agent_cards: Pre-fetched agent cards
|
||||
a2a_agents: List of A2A configurations
|
||||
original_task_description: Original task description
|
||||
conversation_history: Conversation history
|
||||
turn_num: Current turn number
|
||||
max_turns: Maximum turns allowed
|
||||
task: The task being executed
|
||||
original_fn: Original execute_task method
|
||||
context: Optional context
|
||||
tools: Optional tools
|
||||
agent_response_model: Response model for parsing
|
||||
|
||||
Returns:
|
||||
Tuple of (final_result, current_request) where:
|
||||
- final_result is not None if conversation should end
|
||||
- current_request is the next message to send if continuing
|
||||
"""
|
||||
agent_cards_dict = _prepare_agent_cards_dict(a2a_result, agent_id, agent_cards)
|
||||
|
||||
task.description, disable_structured_output = _augment_prompt_with_a2a(
|
||||
a2a_agents=a2a_agents,
|
||||
task_description=original_task_description,
|
||||
conversation_history=conversation_history,
|
||||
turn_num=turn_num,
|
||||
max_turns=max_turns,
|
||||
agent_cards=agent_cards_dict,
|
||||
remote_task_completed=remote_task_completed,
|
||||
)
|
||||
|
||||
original_response_model = task.response_model
|
||||
if disable_structured_output:
|
||||
task.response_model = None
|
||||
|
||||
raw_result = original_fn(self, task, context, tools)
|
||||
|
||||
if disable_structured_output:
|
||||
task.response_model = original_response_model
|
||||
|
||||
return _process_response_result(
|
||||
raw_result=raw_result,
|
||||
disable_structured_output=disable_structured_output,
|
||||
turn_num=turn_num,
|
||||
agent_role=self.role,
|
||||
agent_response_model=agent_response_model,
|
||||
)
|
||||
|
||||
|
||||
def _delegate_to_a2a(
|
||||
self: Agent,
|
||||
agent_response: AgentResponseProtocol,
|
||||
@@ -514,34 +726,24 @@ def _delegate_to_a2a(
|
||||
Raises:
|
||||
ImportError: If a2a-sdk is not installed
|
||||
"""
|
||||
a2a_agents, agent_response_model = get_a2a_agents_and_response_model(self.a2a)
|
||||
agent_ids = tuple(config.endpoint for config in a2a_agents)
|
||||
current_request = str(agent_response.message)
|
||||
|
||||
if hasattr(agent_response, "a2a_ids") and agent_response.a2a_ids:
|
||||
agent_id = agent_response.a2a_ids[0]
|
||||
else:
|
||||
agent_id = agent_ids[0] if agent_ids else ""
|
||||
|
||||
if agent_id and agent_id not in agent_ids:
|
||||
raise ValueError(
|
||||
f"Unknown A2A agent ID(s): {agent_response.a2a_ids} not in {agent_ids}"
|
||||
)
|
||||
|
||||
agent_config = next(filter(lambda x: x.endpoint == agent_id, a2a_agents))
|
||||
task_config = task.config or {}
|
||||
context_id = task_config.get("context_id")
|
||||
task_id_config = task_config.get("task_id")
|
||||
metadata = task_config.get("metadata")
|
||||
extensions = task_config.get("extensions")
|
||||
|
||||
reference_task_ids = task_config.get("reference_task_ids", [])
|
||||
|
||||
if original_task_description is None:
|
||||
original_task_description = task.description
|
||||
(
|
||||
a2a_agents,
|
||||
agent_response_model,
|
||||
current_request,
|
||||
agent_id,
|
||||
agent_config,
|
||||
context_id,
|
||||
task_id_config,
|
||||
metadata,
|
||||
extensions,
|
||||
reference_task_ids,
|
||||
original_task_description,
|
||||
max_turns,
|
||||
) = _prepare_delegation_context(
|
||||
self, agent_response, task, original_task_description
|
||||
)
|
||||
|
||||
conversation_history: list[Message] = []
|
||||
max_turns = agent_config.max_turns
|
||||
|
||||
try:
|
||||
for turn_num in range(max_turns):
|
||||
@@ -568,6 +770,7 @@ def _delegate_to_a2a(
|
||||
agent_branch=agent_branch,
|
||||
response_model=agent_config.response_model,
|
||||
turn_number=turn_num + 1,
|
||||
updates=agent_config.updates,
|
||||
)
|
||||
|
||||
conversation_history = a2a_result.get("history", [])
|
||||
@@ -579,32 +782,19 @@ def _delegate_to_a2a(
|
||||
if latest_message.context_id is not None:
|
||||
context_id = latest_message.context_id
|
||||
|
||||
if a2a_result["status"] in ["completed", "input_required"]:
|
||||
if (
|
||||
a2a_result["status"] == "completed"
|
||||
and agent_config.trust_remote_completion_status
|
||||
):
|
||||
if (
|
||||
task_id_config is not None
|
||||
and task_id_config not in reference_task_ids
|
||||
):
|
||||
reference_task_ids.append(task_id_config)
|
||||
if task.config is None:
|
||||
task.config = {}
|
||||
task.config["reference_task_ids"] = reference_task_ids
|
||||
|
||||
result_text = a2a_result.get("result", "")
|
||||
final_turn_number = turn_num + 1
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AConversationCompletedEvent(
|
||||
status="completed",
|
||||
final_result=result_text,
|
||||
error=None,
|
||||
total_turns=final_turn_number,
|
||||
),
|
||||
if a2a_result["status"] in [TaskState.completed, TaskState.input_required]:
|
||||
trusted_result, task_id_config, reference_task_ids = (
|
||||
_handle_task_completion(
|
||||
a2a_result,
|
||||
task,
|
||||
task_id_config,
|
||||
reference_task_ids,
|
||||
agent_config,
|
||||
turn_num,
|
||||
)
|
||||
return cast(str, result_text)
|
||||
)
|
||||
if trusted_result is not None:
|
||||
return trusted_result
|
||||
|
||||
final_result, next_request = _handle_agent_response_and_continue(
|
||||
self=self,
|
||||
@@ -621,6 +811,7 @@ def _delegate_to_a2a(
|
||||
context=context,
|
||||
tools=tools,
|
||||
agent_response_model=agent_response_model,
|
||||
remote_task_completed=(a2a_result["status"] == TaskState.completed),
|
||||
)
|
||||
|
||||
if final_result is not None:
|
||||
@@ -648,6 +839,321 @@ def _delegate_to_a2a(
|
||||
context=context,
|
||||
tools=tools,
|
||||
agent_response_model=agent_response_model,
|
||||
remote_task_completed=False,
|
||||
)
|
||||
|
||||
if final_result is not None:
|
||||
return final_result
|
||||
|
||||
if next_request is not None:
|
||||
current_request = next_request
|
||||
continue
|
||||
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AConversationCompletedEvent(
|
||||
status="failed",
|
||||
final_result=None,
|
||||
error=error_msg,
|
||||
total_turns=turn_num + 1,
|
||||
),
|
||||
)
|
||||
return f"A2A delegation failed: {error_msg}"
|
||||
|
||||
return _handle_max_turns_exceeded(conversation_history, max_turns)
|
||||
|
||||
finally:
|
||||
task.description = original_task_description
|
||||
|
||||
|
||||
async def _afetch_card_from_config(
|
||||
config: A2AConfig,
|
||||
) -> tuple[A2AConfig, AgentCard | Exception]:
|
||||
"""Fetch agent card from A2A config asynchronously."""
|
||||
try:
|
||||
card = await afetch_agent_card(
|
||||
endpoint=config.endpoint,
|
||||
auth=config.auth,
|
||||
timeout=config.timeout,
|
||||
)
|
||||
return config, card
|
||||
except Exception as e:
|
||||
return config, e
|
||||
|
||||
|
||||
async def _afetch_agent_cards_concurrently(
|
||||
a2a_agents: list[A2AConfig],
|
||||
) -> tuple[dict[str, AgentCard], dict[str, str]]:
|
||||
"""Fetch agent cards concurrently for multiple A2A agents using asyncio."""
|
||||
agent_cards: dict[str, AgentCard] = {}
|
||||
failed_agents: dict[str, str] = {}
|
||||
|
||||
tasks = [_afetch_card_from_config(config) for config in a2a_agents]
|
||||
results = await asyncio.gather(*tasks)
|
||||
|
||||
for config, result in results:
|
||||
if isinstance(result, Exception):
|
||||
if config.fail_fast:
|
||||
raise RuntimeError(
|
||||
f"Failed to fetch agent card from {config.endpoint}. "
|
||||
f"Ensure the A2A agent is running and accessible. Error: {result}"
|
||||
) from result
|
||||
failed_agents[config.endpoint] = str(result)
|
||||
else:
|
||||
agent_cards[config.endpoint] = result
|
||||
|
||||
return agent_cards, failed_agents
|
||||
|
||||
|
||||
async def _aexecute_task_with_a2a(
|
||||
self: Agent,
|
||||
a2a_agents: list[A2AConfig],
|
||||
original_fn: Callable[..., Coroutine[Any, Any, str]],
|
||||
task: Task,
|
||||
agent_response_model: type[BaseModel],
|
||||
context: str | None,
|
||||
tools: list[BaseTool] | None,
|
||||
extension_registry: ExtensionRegistry,
|
||||
) -> str:
|
||||
"""Async version of _execute_task_with_a2a."""
|
||||
original_description: str = task.description
|
||||
original_output_pydantic = task.output_pydantic
|
||||
original_response_model = task.response_model
|
||||
|
||||
agent_cards, failed_agents = await _afetch_agent_cards_concurrently(a2a_agents)
|
||||
|
||||
if not agent_cards and a2a_agents and failed_agents:
|
||||
unavailable_agents_text = ""
|
||||
for endpoint, error in failed_agents.items():
|
||||
unavailable_agents_text += f" - {endpoint}: {error}\n"
|
||||
|
||||
notice = UNAVAILABLE_AGENTS_NOTICE_TEMPLATE.substitute(
|
||||
unavailable_agents=unavailable_agents_text
|
||||
)
|
||||
task.description = f"{original_description}{notice}"
|
||||
|
||||
try:
|
||||
return await original_fn(self, task, context, tools)
|
||||
finally:
|
||||
task.description = original_description
|
||||
|
||||
task.description, _ = _augment_prompt_with_a2a(
|
||||
a2a_agents=a2a_agents,
|
||||
task_description=original_description,
|
||||
agent_cards=agent_cards,
|
||||
failed_agents=failed_agents,
|
||||
extension_registry=extension_registry,
|
||||
)
|
||||
task.response_model = agent_response_model
|
||||
|
||||
try:
|
||||
raw_result = await original_fn(self, task, context, tools)
|
||||
agent_response = _parse_agent_response(
|
||||
raw_result=raw_result, agent_response_model=agent_response_model
|
||||
)
|
||||
|
||||
if extension_registry and isinstance(agent_response, BaseModel):
|
||||
agent_response = extension_registry.process_response_with_all(
|
||||
agent_response, {}
|
||||
)
|
||||
|
||||
if isinstance(agent_response, BaseModel) and isinstance(
|
||||
agent_response, AgentResponseProtocol
|
||||
):
|
||||
if agent_response.is_a2a:
|
||||
return await _adelegate_to_a2a(
|
||||
self,
|
||||
agent_response=agent_response,
|
||||
task=task,
|
||||
original_fn=original_fn,
|
||||
context=context,
|
||||
tools=tools,
|
||||
agent_cards=agent_cards,
|
||||
original_task_description=original_description,
|
||||
extension_registry=extension_registry,
|
||||
)
|
||||
return str(agent_response.message)
|
||||
|
||||
return raw_result
|
||||
finally:
|
||||
task.description = original_description
|
||||
task.output_pydantic = original_output_pydantic
|
||||
task.response_model = original_response_model
|
||||
|
||||
|
||||
async def _ahandle_agent_response_and_continue(
|
||||
self: Agent,
|
||||
a2a_result: TaskStateResult,
|
||||
agent_id: str,
|
||||
agent_cards: dict[str, AgentCard] | None,
|
||||
a2a_agents: list[A2AConfig],
|
||||
original_task_description: str,
|
||||
conversation_history: list[Message],
|
||||
turn_num: int,
|
||||
max_turns: int,
|
||||
task: Task,
|
||||
original_fn: Callable[..., Coroutine[Any, Any, str]],
|
||||
context: str | None,
|
||||
tools: list[BaseTool] | None,
|
||||
agent_response_model: type[BaseModel],
|
||||
remote_task_completed: bool = False,
|
||||
) -> tuple[str | None, str | None]:
|
||||
"""Async version of _handle_agent_response_and_continue."""
|
||||
agent_cards_dict = _prepare_agent_cards_dict(a2a_result, agent_id, agent_cards)
|
||||
|
||||
task.description, disable_structured_output = _augment_prompt_with_a2a(
|
||||
a2a_agents=a2a_agents,
|
||||
task_description=original_task_description,
|
||||
conversation_history=conversation_history,
|
||||
turn_num=turn_num,
|
||||
max_turns=max_turns,
|
||||
agent_cards=agent_cards_dict,
|
||||
remote_task_completed=remote_task_completed,
|
||||
)
|
||||
|
||||
original_response_model = task.response_model
|
||||
if disable_structured_output:
|
||||
task.response_model = None
|
||||
|
||||
raw_result = await original_fn(self, task, context, tools)
|
||||
|
||||
if disable_structured_output:
|
||||
task.response_model = original_response_model
|
||||
|
||||
return _process_response_result(
|
||||
raw_result=raw_result,
|
||||
disable_structured_output=disable_structured_output,
|
||||
turn_num=turn_num,
|
||||
agent_role=self.role,
|
||||
agent_response_model=agent_response_model,
|
||||
)
|
||||
|
||||
|
||||
async def _adelegate_to_a2a(
|
||||
self: Agent,
|
||||
agent_response: AgentResponseProtocol,
|
||||
task: Task,
|
||||
original_fn: Callable[..., Coroutine[Any, Any, str]],
|
||||
context: str | None,
|
||||
tools: list[BaseTool] | None,
|
||||
agent_cards: dict[str, AgentCard] | None = None,
|
||||
original_task_description: str | None = None,
|
||||
extension_registry: ExtensionRegistry | None = None,
|
||||
) -> str:
|
||||
"""Async version of _delegate_to_a2a."""
|
||||
(
|
||||
a2a_agents,
|
||||
agent_response_model,
|
||||
current_request,
|
||||
agent_id,
|
||||
agent_config,
|
||||
context_id,
|
||||
task_id_config,
|
||||
metadata,
|
||||
extensions,
|
||||
reference_task_ids,
|
||||
original_task_description,
|
||||
max_turns,
|
||||
) = _prepare_delegation_context(
|
||||
self, agent_response, task, original_task_description
|
||||
)
|
||||
|
||||
conversation_history: list[Message] = []
|
||||
|
||||
try:
|
||||
for turn_num in range(max_turns):
|
||||
console_formatter = getattr(crewai_event_bus, "_console", None)
|
||||
agent_branch = None
|
||||
if console_formatter:
|
||||
agent_branch = getattr(
|
||||
console_formatter, "current_agent_branch", None
|
||||
) or getattr(console_formatter, "current_task_branch", None)
|
||||
|
||||
a2a_result = await aexecute_a2a_delegation(
|
||||
endpoint=agent_config.endpoint,
|
||||
auth=agent_config.auth,
|
||||
timeout=agent_config.timeout,
|
||||
task_description=current_request,
|
||||
context_id=context_id,
|
||||
task_id=task_id_config,
|
||||
reference_task_ids=reference_task_ids,
|
||||
metadata=metadata,
|
||||
extensions=extensions,
|
||||
conversation_history=conversation_history,
|
||||
agent_id=agent_id,
|
||||
agent_role=Role.user,
|
||||
agent_branch=agent_branch,
|
||||
response_model=agent_config.response_model,
|
||||
turn_number=turn_num + 1,
|
||||
updates=agent_config.updates,
|
||||
)
|
||||
|
||||
conversation_history = a2a_result.get("history", [])
|
||||
|
||||
if conversation_history:
|
||||
latest_message = conversation_history[-1]
|
||||
if latest_message.task_id is not None:
|
||||
task_id_config = latest_message.task_id
|
||||
if latest_message.context_id is not None:
|
||||
context_id = latest_message.context_id
|
||||
|
||||
if a2a_result["status"] in [TaskState.completed, TaskState.input_required]:
|
||||
trusted_result, task_id_config, reference_task_ids = (
|
||||
_handle_task_completion(
|
||||
a2a_result,
|
||||
task,
|
||||
task_id_config,
|
||||
reference_task_ids,
|
||||
agent_config,
|
||||
turn_num,
|
||||
)
|
||||
)
|
||||
if trusted_result is not None:
|
||||
return trusted_result
|
||||
|
||||
final_result, next_request = await _ahandle_agent_response_and_continue(
|
||||
self=self,
|
||||
a2a_result=a2a_result,
|
||||
agent_id=agent_id,
|
||||
agent_cards=agent_cards,
|
||||
a2a_agents=a2a_agents,
|
||||
original_task_description=original_task_description,
|
||||
conversation_history=conversation_history,
|
||||
turn_num=turn_num,
|
||||
max_turns=max_turns,
|
||||
task=task,
|
||||
original_fn=original_fn,
|
||||
context=context,
|
||||
tools=tools,
|
||||
agent_response_model=agent_response_model,
|
||||
remote_task_completed=(a2a_result["status"] == TaskState.completed),
|
||||
)
|
||||
|
||||
if final_result is not None:
|
||||
return final_result
|
||||
|
||||
if next_request is not None:
|
||||
current_request = next_request
|
||||
|
||||
continue
|
||||
|
||||
error_msg = a2a_result.get("error", "Unknown error")
|
||||
|
||||
final_result, next_request = await _ahandle_agent_response_and_continue(
|
||||
self=self,
|
||||
a2a_result=a2a_result,
|
||||
agent_id=agent_id,
|
||||
agent_cards=agent_cards,
|
||||
a2a_agents=a2a_agents,
|
||||
original_task_description=original_task_description,
|
||||
conversation_history=conversation_history,
|
||||
turn_num=turn_num,
|
||||
max_turns=max_turns,
|
||||
task=task,
|
||||
original_fn=original_fn,
|
||||
context=context,
|
||||
tools=tools,
|
||||
agent_response_model=agent_response_model,
|
||||
)
|
||||
|
||||
if final_result is not None:
|
||||
@@ -668,36 +1174,7 @@ def _delegate_to_a2a(
|
||||
)
|
||||
return f"A2A delegation failed: {error_msg}"
|
||||
|
||||
if conversation_history:
|
||||
for msg in reversed(conversation_history):
|
||||
if msg.role == Role.agent:
|
||||
text_parts = [
|
||||
part.root.text for part in msg.parts if part.root.kind == "text"
|
||||
]
|
||||
final_message = (
|
||||
" ".join(text_parts) if text_parts else "Conversation completed"
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AConversationCompletedEvent(
|
||||
status="completed",
|
||||
final_result=final_message,
|
||||
error=None,
|
||||
total_turns=max_turns,
|
||||
),
|
||||
)
|
||||
return final_message
|
||||
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AConversationCompletedEvent(
|
||||
status="failed",
|
||||
final_result=None,
|
||||
error=f"Conversation exceeded maximum turns ({max_turns})",
|
||||
total_turns=max_turns,
|
||||
),
|
||||
)
|
||||
raise Exception(f"A2A conversation exceeded maximum turns ({max_turns})")
|
||||
return _handle_max_turns_exceeded(conversation_history, max_turns)
|
||||
|
||||
finally:
|
||||
task.description = original_task_description
|
||||
|
||||
@@ -10,7 +10,7 @@ from collections.abc import Callable
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, Literal, cast
|
||||
|
||||
from pydantic import BaseModel, GetCoreSchemaHandler
|
||||
from pydantic import BaseModel, GetCoreSchemaHandler, ValidationError
|
||||
from pydantic_core import CoreSchema, core_schema
|
||||
|
||||
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
|
||||
@@ -244,7 +244,20 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
response_model=self.response_model,
|
||||
executor_context=self,
|
||||
)
|
||||
formatted_answer = process_llm_response(answer, self.use_stop_words) # type: ignore[assignment]
|
||||
if self.response_model is not None:
|
||||
try:
|
||||
self.response_model.model_validate_json(answer)
|
||||
formatted_answer = AgentFinish(
|
||||
thought="",
|
||||
output=answer,
|
||||
text=answer,
|
||||
)
|
||||
except ValidationError:
|
||||
formatted_answer = process_llm_response(
|
||||
answer, self.use_stop_words
|
||||
) # type: ignore[assignment]
|
||||
else:
|
||||
formatted_answer = process_llm_response(answer, self.use_stop_words) # type: ignore[assignment]
|
||||
|
||||
if isinstance(formatted_answer, AgentAction):
|
||||
# Extract agent fingerprint if available
|
||||
@@ -278,7 +291,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
)
|
||||
|
||||
self._invoke_step_callback(formatted_answer) # type: ignore[arg-type]
|
||||
self._append_message(formatted_answer.text) # type: ignore[union-attr,attr-defined]
|
||||
self._append_message(formatted_answer.text) # type: ignore[union-attr]
|
||||
|
||||
except OutputParserError as e:
|
||||
formatted_answer = handle_output_parser_exception( # type: ignore[assignment]
|
||||
@@ -398,7 +411,21 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
response_model=self.response_model,
|
||||
executor_context=self,
|
||||
)
|
||||
formatted_answer = process_llm_response(answer, self.use_stop_words) # type: ignore[assignment]
|
||||
|
||||
if self.response_model is not None:
|
||||
try:
|
||||
self.response_model.model_validate_json(answer)
|
||||
formatted_answer = AgentFinish(
|
||||
thought="",
|
||||
output=answer,
|
||||
text=answer,
|
||||
)
|
||||
except ValidationError:
|
||||
formatted_answer = process_llm_response(
|
||||
answer, self.use_stop_words
|
||||
) # type: ignore[assignment]
|
||||
else:
|
||||
formatted_answer = process_llm_response(answer, self.use_stop_words) # type: ignore[assignment]
|
||||
|
||||
if isinstance(formatted_answer, AgentAction):
|
||||
fingerprint_context = {}
|
||||
@@ -431,7 +458,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
)
|
||||
|
||||
self._invoke_step_callback(formatted_answer) # type: ignore[arg-type]
|
||||
self._append_message(formatted_answer.text) # type: ignore[union-attr,attr-defined]
|
||||
self._append_message(formatted_answer.text) # type: ignore[union-attr]
|
||||
|
||||
except OutputParserError as e:
|
||||
formatted_answer = handle_output_parser_exception( # type: ignore[assignment]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]==1.7.2"
|
||||
"crewai[tools]==1.8.0"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]==1.7.2"
|
||||
"crewai[tools]==1.8.0"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -10,7 +10,7 @@ This module provides the event infrastructure that allows users to:
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from crewai.events.base_event_listener import BaseEventListener
|
||||
from crewai.events.depends import Depends
|
||||
@@ -34,6 +34,8 @@ from crewai.events.types.flow_events import (
|
||||
FlowFinishedEvent,
|
||||
FlowPlotEvent,
|
||||
FlowStartedEvent,
|
||||
HumanFeedbackReceivedEvent,
|
||||
HumanFeedbackRequestedEvent,
|
||||
MethodExecutionFailedEvent,
|
||||
MethodExecutionFinishedEvent,
|
||||
MethodExecutionStartedEvent,
|
||||
@@ -145,6 +147,8 @@ __all__ = [
|
||||
"FlowFinishedEvent",
|
||||
"FlowPlotEvent",
|
||||
"FlowStartedEvent",
|
||||
"HumanFeedbackReceivedEvent",
|
||||
"HumanFeedbackRequestedEvent",
|
||||
"KnowledgeQueryCompletedEvent",
|
||||
"KnowledgeQueryFailedEvent",
|
||||
"KnowledgeQueryStartedEvent",
|
||||
@@ -205,7 +209,7 @@ _AGENT_EVENT_MAPPING = {
|
||||
}
|
||||
|
||||
|
||||
def __getattr__(name: str):
|
||||
def __getattr__(name: str) -> Any:
|
||||
"""Lazy import for agent events to avoid circular imports."""
|
||||
if name in _AGENT_EVENT_MAPPING:
|
||||
import importlib
|
||||
|
||||
@@ -13,6 +13,8 @@ from crewai.events.types.a2a_events import (
|
||||
A2ADelegationCompletedEvent,
|
||||
A2ADelegationStartedEvent,
|
||||
A2AMessageSentEvent,
|
||||
A2APollingStartedEvent,
|
||||
A2APollingStatusEvent,
|
||||
A2AResponseReceivedEvent,
|
||||
)
|
||||
from crewai.events.types.agent_events import (
|
||||
@@ -37,6 +39,8 @@ from crewai.events.types.flow_events import (
|
||||
FlowFinishedEvent,
|
||||
FlowPausedEvent,
|
||||
FlowStartedEvent,
|
||||
HumanFeedbackReceivedEvent,
|
||||
HumanFeedbackRequestedEvent,
|
||||
MethodExecutionFailedEvent,
|
||||
MethodExecutionFinishedEvent,
|
||||
MethodExecutionPausedEvent,
|
||||
@@ -67,7 +71,6 @@ from crewai.events.types.mcp_events import (
|
||||
MCPConnectionCompletedEvent,
|
||||
MCPConnectionFailedEvent,
|
||||
MCPConnectionStartedEvent,
|
||||
MCPToolExecutionCompletedEvent,
|
||||
MCPToolExecutionFailedEvent,
|
||||
MCPToolExecutionStartedEvent,
|
||||
)
|
||||
@@ -329,6 +332,33 @@ class EventListener(BaseEventListener):
|
||||
"paused",
|
||||
)
|
||||
|
||||
# ----------- HUMAN FEEDBACK EVENTS -----------
|
||||
@crewai_event_bus.on(HumanFeedbackRequestedEvent)
|
||||
def on_human_feedback_requested(
|
||||
_: Any, event: HumanFeedbackRequestedEvent
|
||||
) -> None:
|
||||
"""Handle human feedback requested event."""
|
||||
has_routing = event.emit is not None and len(event.emit) > 0
|
||||
self._telemetry.human_feedback_span(
|
||||
event_type="requested",
|
||||
has_routing=has_routing,
|
||||
num_outcomes=len(event.emit) if event.emit else 0,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(HumanFeedbackReceivedEvent)
|
||||
def on_human_feedback_received(
|
||||
_: Any, event: HumanFeedbackReceivedEvent
|
||||
) -> None:
|
||||
"""Handle human feedback received event."""
|
||||
has_routing = event.outcome is not None
|
||||
self._telemetry.human_feedback_span(
|
||||
event_type="received",
|
||||
has_routing=has_routing,
|
||||
num_outcomes=0,
|
||||
feedback_provided=bool(event.feedback and event.feedback.strip()),
|
||||
outcome=event.outcome,
|
||||
)
|
||||
|
||||
# ----------- TOOL USAGE EVENTS -----------
|
||||
@crewai_event_bus.on(ToolUsageStartedEvent)
|
||||
def on_tool_usage_started(source: Any, event: ToolUsageStartedEvent) -> None:
|
||||
@@ -580,6 +610,23 @@ class EventListener(BaseEventListener):
|
||||
event.total_turns,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(A2APollingStartedEvent)
|
||||
def on_a2a_polling_started(_: Any, event: A2APollingStartedEvent) -> None:
|
||||
self.formatter.handle_a2a_polling_started(
|
||||
event.task_id,
|
||||
event.polling_interval,
|
||||
event.endpoint,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(A2APollingStatusEvent)
|
||||
def on_a2a_polling_status(_: Any, event: A2APollingStatusEvent) -> None:
|
||||
self.formatter.handle_a2a_polling_status(
|
||||
event.task_id,
|
||||
event.state,
|
||||
event.elapsed_seconds,
|
||||
event.poll_count,
|
||||
)
|
||||
|
||||
# ----------- MCP EVENTS -----------
|
||||
|
||||
@crewai_event_bus.on(MCPConnectionStartedEvent)
|
||||
|
||||
@@ -15,7 +15,7 @@ class A2AEventBase(BaseEvent):
|
||||
from_task: Any | None = None
|
||||
from_agent: Any | None = None
|
||||
|
||||
def __init__(self, **data):
|
||||
def __init__(self, **data: Any) -> None:
|
||||
"""Initialize A2A event, extracting task and agent metadata."""
|
||||
if data.get("from_task"):
|
||||
task = data["from_task"]
|
||||
@@ -139,3 +139,74 @@ class A2AConversationCompletedEvent(A2AEventBase):
|
||||
final_result: str | None = None
|
||||
error: str | None = None
|
||||
total_turns: int
|
||||
|
||||
|
||||
class A2APollingStartedEvent(A2AEventBase):
|
||||
"""Event emitted when polling mode begins for A2A delegation.
|
||||
|
||||
Attributes:
|
||||
task_id: A2A task ID being polled
|
||||
polling_interval: Seconds between poll attempts
|
||||
endpoint: A2A agent endpoint URL
|
||||
"""
|
||||
|
||||
type: str = "a2a_polling_started"
|
||||
task_id: str
|
||||
polling_interval: float
|
||||
endpoint: str
|
||||
|
||||
|
||||
class A2APollingStatusEvent(A2AEventBase):
|
||||
"""Event emitted on each polling iteration.
|
||||
|
||||
Attributes:
|
||||
task_id: A2A task ID being polled
|
||||
state: Current task state from remote agent
|
||||
elapsed_seconds: Time since polling started
|
||||
poll_count: Number of polls completed
|
||||
"""
|
||||
|
||||
type: str = "a2a_polling_status"
|
||||
task_id: str
|
||||
state: str
|
||||
elapsed_seconds: float
|
||||
poll_count: int
|
||||
|
||||
|
||||
class A2APushNotificationRegisteredEvent(A2AEventBase):
|
||||
"""Event emitted when push notification callback is registered.
|
||||
|
||||
Attributes:
|
||||
task_id: A2A task ID for which callback is registered
|
||||
callback_url: URL where agent will send push notifications
|
||||
"""
|
||||
|
||||
type: str = "a2a_push_notification_registered"
|
||||
task_id: str
|
||||
callback_url: str
|
||||
|
||||
|
||||
class A2APushNotificationReceivedEvent(A2AEventBase):
|
||||
"""Event emitted when a push notification is received.
|
||||
|
||||
Attributes:
|
||||
task_id: A2A task ID from the notification
|
||||
state: Current task state from the notification
|
||||
"""
|
||||
|
||||
type: str = "a2a_push_notification_received"
|
||||
task_id: str
|
||||
state: str
|
||||
|
||||
|
||||
class A2APushNotificationTimeoutEvent(A2AEventBase):
|
||||
"""Event emitted when push notification wait times out.
|
||||
|
||||
Attributes:
|
||||
task_id: A2A task ID that timed out
|
||||
timeout_seconds: Timeout duration in seconds
|
||||
"""
|
||||
|
||||
type: str = "a2a_push_notification_timeout"
|
||||
task_id: str
|
||||
timeout_seconds: float
|
||||
|
||||
@@ -114,7 +114,6 @@ To enable tracing, do any one of these:
|
||||
New streaming sessions will be created on-demand when needed.
|
||||
This method exists for API compatibility with HITL callers.
|
||||
"""
|
||||
pass
|
||||
|
||||
def print_panel(
|
||||
self, content: Text, title: str, style: str = "blue", is_flow: bool = False
|
||||
@@ -1417,3 +1416,49 @@ To enable tracing, do any one of these:
|
||||
panel = self.create_panel(content, "❌ MCP Tool Failed", "red")
|
||||
self.print(panel)
|
||||
self.print()
|
||||
|
||||
def handle_a2a_polling_started(
|
||||
self,
|
||||
task_id: str,
|
||||
polling_interval: float,
|
||||
endpoint: str,
|
||||
) -> None:
|
||||
"""Handle A2A polling started event with panel display."""
|
||||
content = Text()
|
||||
content.append("A2A Polling Started\n", style="cyan bold")
|
||||
content.append("Task ID: ", style="white")
|
||||
content.append(f"{task_id[:8]}...\n", style="cyan")
|
||||
content.append("Interval: ", style="white")
|
||||
content.append(f"{polling_interval}s\n", style="cyan")
|
||||
|
||||
self.print_panel(content, "⏳ A2A Polling", "cyan")
|
||||
|
||||
def handle_a2a_polling_status(
|
||||
self,
|
||||
task_id: str,
|
||||
state: str,
|
||||
elapsed_seconds: float,
|
||||
poll_count: int,
|
||||
) -> None:
|
||||
"""Handle A2A polling status event with panel display."""
|
||||
if state == "completed":
|
||||
style = "green"
|
||||
status_indicator = "✓"
|
||||
elif state == "failed":
|
||||
style = "red"
|
||||
status_indicator = "✗"
|
||||
elif state == "working":
|
||||
style = "yellow"
|
||||
status_indicator = "⋯"
|
||||
else:
|
||||
style = "cyan"
|
||||
status_indicator = "•"
|
||||
|
||||
content = Text()
|
||||
content.append(f"Poll #{poll_count}\n", style=f"{style} bold")
|
||||
content.append("Status: ", style="white")
|
||||
content.append(f"{status_indicator} {state}\n", style=style)
|
||||
content.append("Elapsed: ", style="white")
|
||||
content.append(f"{elapsed_seconds:.1f}s\n", style=style)
|
||||
|
||||
self.print_panel(content, f"📊 A2A Poll #{poll_count}", style)
|
||||
|
||||
@@ -5,6 +5,7 @@ from crewai.flow.async_feedback import (
|
||||
PendingFeedbackContext,
|
||||
)
|
||||
from crewai.flow.flow import Flow, and_, listen, or_, router, start
|
||||
from crewai.flow.flow_config import flow_config
|
||||
from crewai.flow.human_feedback import HumanFeedbackResult, human_feedback
|
||||
from crewai.flow.persistence import persist
|
||||
from crewai.flow.visualization import (
|
||||
@@ -24,6 +25,7 @@ __all__ = [
|
||||
"PendingFeedbackContext",
|
||||
"and_",
|
||||
"build_flow_structure",
|
||||
"flow_config",
|
||||
"human_feedback",
|
||||
"listen",
|
||||
"or_",
|
||||
|
||||
@@ -1203,7 +1203,13 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
result = self.kickoff(inputs=inputs)
|
||||
result_holder.append(result)
|
||||
except Exception as e:
|
||||
signal_error(state, e)
|
||||
# HumanFeedbackPending is expected control flow, not an error
|
||||
from crewai.flow.async_feedback.types import HumanFeedbackPending
|
||||
|
||||
if isinstance(e, HumanFeedbackPending):
|
||||
result_holder.append(e)
|
||||
else:
|
||||
signal_error(state, e)
|
||||
finally:
|
||||
self.stream = True
|
||||
signal_end(state)
|
||||
@@ -1258,7 +1264,13 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
result = await self.kickoff_async(inputs=inputs)
|
||||
result_holder.append(result)
|
||||
except Exception as e:
|
||||
signal_error(state, e, is_async=True)
|
||||
# HumanFeedbackPending is expected control flow, not an error
|
||||
from crewai.flow.async_feedback.types import HumanFeedbackPending
|
||||
|
||||
if isinstance(e, HumanFeedbackPending):
|
||||
result_holder.append(e)
|
||||
else:
|
||||
signal_error(state, e, is_async=True)
|
||||
finally:
|
||||
self.stream = True
|
||||
signal_end(state, is_async=True)
|
||||
@@ -1590,29 +1602,45 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
|
||||
return result
|
||||
except Exception as e:
|
||||
if not self.suppress_flow_events:
|
||||
# Check if this is a HumanFeedbackPending exception (paused, not failed)
|
||||
from crewai.flow.async_feedback.types import HumanFeedbackPending
|
||||
# Check if this is a HumanFeedbackPending exception (paused, not failed)
|
||||
from crewai.flow.async_feedback.types import HumanFeedbackPending
|
||||
|
||||
if isinstance(e, HumanFeedbackPending):
|
||||
# Auto-save pending feedback (create default persistence if needed)
|
||||
if self._persistence is None:
|
||||
from crewai.flow.persistence import SQLiteFlowPersistence
|
||||
if isinstance(e, HumanFeedbackPending):
|
||||
# Auto-save pending feedback (create default persistence if needed)
|
||||
if self._persistence is None:
|
||||
from crewai.flow.persistence import SQLiteFlowPersistence
|
||||
|
||||
self._persistence = SQLiteFlowPersistence()
|
||||
self._persistence = SQLiteFlowPersistence()
|
||||
|
||||
# Regular failure
|
||||
future = crewai_event_bus.emit(
|
||||
self,
|
||||
MethodExecutionFailedEvent(
|
||||
type="method_execution_failed",
|
||||
method_name=method_name,
|
||||
flow_name=self.name or self.__class__.__name__,
|
||||
error=e,
|
||||
),
|
||||
)
|
||||
if future:
|
||||
self._event_futures.append(future)
|
||||
# Emit paused event (not failed)
|
||||
if not self.suppress_flow_events:
|
||||
future = crewai_event_bus.emit(
|
||||
self,
|
||||
MethodExecutionPausedEvent(
|
||||
type="method_execution_paused",
|
||||
method_name=method_name,
|
||||
flow_name=self.name or self.__class__.__name__,
|
||||
state=self._copy_and_serialize_state(),
|
||||
flow_id=e.context.flow_id,
|
||||
message=e.context.message,
|
||||
emit=e.context.emit,
|
||||
),
|
||||
)
|
||||
if future:
|
||||
self._event_futures.append(future)
|
||||
elif not self.suppress_flow_events:
|
||||
# Regular failure - emit failed event
|
||||
future = crewai_event_bus.emit(
|
||||
self,
|
||||
MethodExecutionFailedEvent(
|
||||
type="method_execution_failed",
|
||||
method_name=method_name,
|
||||
flow_name=self.name or self.__class__.__name__,
|
||||
error=e,
|
||||
),
|
||||
)
|
||||
if future:
|
||||
self._event_futures.append(future)
|
||||
raise e
|
||||
|
||||
def _copy_and_serialize_state(self) -> dict[str, Any]:
|
||||
|
||||
39
lib/crewai/src/crewai/flow/flow_config.py
Normal file
39
lib/crewai/src/crewai/flow/flow_config.py
Normal file
@@ -0,0 +1,39 @@
|
||||
"""Global Flow configuration.
|
||||
|
||||
This module provides a singleton configuration object that can be used to
|
||||
customize Flow behavior at runtime.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.flow.async_feedback.types import HumanFeedbackProvider
|
||||
|
||||
|
||||
class FlowConfig:
|
||||
"""Global configuration for Flow execution.
|
||||
|
||||
Attributes:
|
||||
hitl_provider: The human-in-the-loop feedback provider.
|
||||
Defaults to None (uses console input).
|
||||
Can be overridden by deployments at startup.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._hitl_provider: HumanFeedbackProvider | None = None
|
||||
|
||||
@property
|
||||
def hitl_provider(self) -> Any:
|
||||
"""Get the configured HITL provider."""
|
||||
return self._hitl_provider
|
||||
|
||||
@hitl_provider.setter
|
||||
def hitl_provider(self, provider: Any) -> None:
|
||||
"""Set the HITL provider."""
|
||||
self._hitl_provider = provider
|
||||
|
||||
|
||||
# Singleton instance
|
||||
flow_config = FlowConfig()
|
||||
@@ -283,11 +283,18 @@ def human_feedback(
|
||||
llm=llm if isinstance(llm, str) else None,
|
||||
)
|
||||
|
||||
if provider is not None:
|
||||
# Use custom provider (may raise HumanFeedbackPending)
|
||||
return provider.request_feedback(context, flow_instance)
|
||||
# Determine effective provider:
|
||||
effective_provider = provider
|
||||
if effective_provider is None:
|
||||
from crewai.flow.flow_config import flow_config
|
||||
|
||||
effective_provider = flow_config.hitl_provider
|
||||
|
||||
if effective_provider is not None:
|
||||
# Use provider (may raise HumanFeedbackPending for async providers)
|
||||
return effective_provider.request_feedback(context, flow_instance)
|
||||
else:
|
||||
# Use default console input
|
||||
# Use default console input (local development)
|
||||
return flow_instance._request_human_feedback(
|
||||
message=message,
|
||||
output=method_output,
|
||||
|
||||
@@ -925,11 +925,12 @@ class LLM(BaseLLM):
|
||||
except Exception as e:
|
||||
logging.debug(f"Error checking for tool calls: {e}")
|
||||
|
||||
# Track token usage and log callbacks if available in streaming mode
|
||||
if usage_info:
|
||||
self._track_token_usage_internal(usage_info)
|
||||
self._handle_streaming_callbacks(callbacks, usage_info, last_chunk)
|
||||
|
||||
if not tool_calls or not available_functions:
|
||||
# Track token usage and log callbacks if available in streaming mode
|
||||
if usage_info:
|
||||
self._track_token_usage_internal(usage_info)
|
||||
self._handle_streaming_callbacks(callbacks, usage_info, last_chunk)
|
||||
|
||||
if response_model and self.is_litellm:
|
||||
instructor_instance = InternalInstructor(
|
||||
@@ -962,12 +963,7 @@ class LLM(BaseLLM):
|
||||
if tool_result is not None:
|
||||
return tool_result
|
||||
|
||||
# --- 10) Track token usage and log callbacks if available in streaming mode
|
||||
if usage_info:
|
||||
self._track_token_usage_internal(usage_info)
|
||||
self._handle_streaming_callbacks(callbacks, usage_info, last_chunk)
|
||||
|
||||
# --- 11) Emit completion event and return response
|
||||
# --- 10) Emit completion event and return response
|
||||
self._handle_emit_call_events(
|
||||
response=full_response,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
@@ -1148,6 +1144,10 @@ class LLM(BaseLLM):
|
||||
if response_model:
|
||||
params["response_model"] = response_model
|
||||
response = litellm.completion(**params)
|
||||
|
||||
if hasattr(response,"usage") and not isinstance(response.usage, type) and response.usage:
|
||||
usage_info = response.usage
|
||||
self._track_token_usage_internal(usage_info)
|
||||
|
||||
except ContextWindowExceededError as e:
|
||||
# Convert litellm's context window error to our own exception type
|
||||
@@ -1273,6 +1273,10 @@ class LLM(BaseLLM):
|
||||
params["response_model"] = response_model
|
||||
response = await litellm.acompletion(**params)
|
||||
|
||||
if hasattr(response,"usage") and not isinstance(response.usage, type) and response.usage:
|
||||
usage_info = response.usage
|
||||
self._track_token_usage_internal(usage_info)
|
||||
|
||||
except ContextWindowExceededError as e:
|
||||
raise LLMContextLengthExceededError(str(e)) from e
|
||||
|
||||
@@ -1359,6 +1363,7 @@ class LLM(BaseLLM):
|
||||
"""
|
||||
full_response = ""
|
||||
chunk_count = 0
|
||||
|
||||
usage_info = None
|
||||
|
||||
accumulated_tool_args: defaultdict[int, AccumulatedToolArgs] = defaultdict(
|
||||
@@ -1444,6 +1449,9 @@ class LLM(BaseLLM):
|
||||
end_time=0,
|
||||
)
|
||||
|
||||
if usage_info:
|
||||
self._track_token_usage_internal(usage_info)
|
||||
|
||||
if accumulated_tool_args and available_functions:
|
||||
# Convert accumulated tool args to ChatCompletionDeltaToolCall objects
|
||||
tool_calls_list: list[ChatCompletionDeltaToolCall] = [
|
||||
|
||||
@@ -969,3 +969,35 @@ class Telemetry:
|
||||
close_span(span)
|
||||
|
||||
self._safe_telemetry_operation(_operation)
|
||||
|
||||
def human_feedback_span(
|
||||
self,
|
||||
event_type: str,
|
||||
has_routing: bool,
|
||||
num_outcomes: int = 0,
|
||||
feedback_provided: bool | None = None,
|
||||
outcome: str | None = None,
|
||||
) -> None:
|
||||
"""Records human feedback feature usage.
|
||||
|
||||
Args:
|
||||
event_type: Type of event - "requested" or "received".
|
||||
has_routing: Whether emit options were configured for routing.
|
||||
num_outcomes: Number of possible outcomes if routing is used.
|
||||
feedback_provided: Whether user provided feedback or skipped (None if requested).
|
||||
outcome: The collapsed outcome string if routing was used.
|
||||
"""
|
||||
|
||||
def _operation() -> None:
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Human Feedback")
|
||||
self._add_attribute(span, "event_type", event_type)
|
||||
self._add_attribute(span, "has_routing", has_routing)
|
||||
self._add_attribute(span, "num_outcomes", num_outcomes)
|
||||
if feedback_provided is not None:
|
||||
self._add_attribute(span, "feedback_provided", feedback_provided)
|
||||
if outcome is not None:
|
||||
self._add_attribute(span, "outcome", outcome)
|
||||
close_span(span)
|
||||
|
||||
self._safe_telemetry_operation(_operation)
|
||||
|
||||
323
lib/crewai/tests/a2a/test_a2a_integration.py
Normal file
323
lib/crewai/tests/a2a/test_a2a_integration.py
Normal file
@@ -0,0 +1,323 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import uuid
|
||||
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
|
||||
from a2a.client import ClientFactory
|
||||
from a2a.types import AgentCard, Message, Part, Role, TaskState, TextPart
|
||||
|
||||
from crewai.a2a.updates.polling.handler import PollingHandler
|
||||
from crewai.a2a.updates.streaming.handler import StreamingHandler
|
||||
|
||||
|
||||
A2A_TEST_ENDPOINT = os.getenv("A2A_TEST_ENDPOINT", "http://localhost:9999")
|
||||
|
||||
|
||||
@pytest_asyncio.fixture
|
||||
async def a2a_client():
|
||||
"""Create A2A client for test server."""
|
||||
client = await ClientFactory.connect(A2A_TEST_ENDPOINT)
|
||||
yield client
|
||||
await client.close()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_message() -> Message:
|
||||
"""Create a simple test message."""
|
||||
return Message(
|
||||
role=Role.user,
|
||||
parts=[Part(root=TextPart(text="What is 2 + 2?"))],
|
||||
message_id=str(uuid.uuid4()),
|
||||
)
|
||||
|
||||
|
||||
@pytest_asyncio.fixture
|
||||
async def agent_card(a2a_client) -> AgentCard:
|
||||
"""Fetch the real agent card from the server."""
|
||||
return await a2a_client.get_card()
|
||||
|
||||
|
||||
class TestA2AAgentCardFetching:
|
||||
"""Integration tests for agent card fetching."""
|
||||
|
||||
@pytest.mark.vcr()
|
||||
@pytest.mark.asyncio
|
||||
async def test_fetch_agent_card(self, a2a_client) -> None:
|
||||
"""Test fetching an agent card from the server."""
|
||||
card = await a2a_client.get_card()
|
||||
|
||||
assert card is not None
|
||||
assert card.name == "GPT Assistant"
|
||||
assert card.url is not None
|
||||
assert card.capabilities is not None
|
||||
assert card.capabilities.streaming is True
|
||||
|
||||
|
||||
class TestA2APollingIntegration:
|
||||
"""Integration tests for A2A polling handler."""
|
||||
|
||||
@pytest.mark.vcr()
|
||||
@pytest.mark.asyncio
|
||||
async def test_polling_completes_task(
|
||||
self,
|
||||
a2a_client,
|
||||
test_message: Message,
|
||||
agent_card: AgentCard,
|
||||
) -> None:
|
||||
"""Test that polling handler completes a task successfully."""
|
||||
new_messages: list[Message] = []
|
||||
|
||||
result = await PollingHandler.execute(
|
||||
client=a2a_client,
|
||||
message=test_message,
|
||||
new_messages=new_messages,
|
||||
agent_card=agent_card,
|
||||
polling_interval=0.5,
|
||||
polling_timeout=30.0,
|
||||
)
|
||||
|
||||
assert isinstance(result, dict)
|
||||
assert result["status"] == TaskState.completed
|
||||
assert result.get("result") is not None
|
||||
assert "4" in result["result"]
|
||||
|
||||
|
||||
class TestA2AStreamingIntegration:
|
||||
"""Integration tests for A2A streaming handler."""
|
||||
|
||||
@pytest.mark.vcr()
|
||||
@pytest.mark.asyncio
|
||||
async def test_streaming_completes_task(
|
||||
self,
|
||||
a2a_client,
|
||||
test_message: Message,
|
||||
agent_card: AgentCard,
|
||||
) -> None:
|
||||
"""Test that streaming handler completes a task successfully."""
|
||||
new_messages: list[Message] = []
|
||||
|
||||
result = await StreamingHandler.execute(
|
||||
client=a2a_client,
|
||||
message=test_message,
|
||||
new_messages=new_messages,
|
||||
agent_card=agent_card,
|
||||
)
|
||||
|
||||
assert isinstance(result, dict)
|
||||
assert result["status"] == TaskState.completed
|
||||
assert result.get("result") is not None
|
||||
|
||||
|
||||
class TestA2ATaskOperations:
|
||||
"""Integration tests for task operations."""
|
||||
|
||||
@pytest.mark.vcr()
|
||||
@pytest.mark.asyncio
|
||||
async def test_send_message_and_get_response(
|
||||
self,
|
||||
a2a_client,
|
||||
test_message: Message,
|
||||
) -> None:
|
||||
"""Test sending a message and getting a response."""
|
||||
from a2a.types import Task
|
||||
|
||||
final_task: Task | None = None
|
||||
async for event in a2a_client.send_message(test_message):
|
||||
if isinstance(event, tuple) and len(event) >= 1:
|
||||
task, _ = event
|
||||
if isinstance(task, Task):
|
||||
final_task = task
|
||||
|
||||
assert final_task is not None
|
||||
assert final_task.id is not None
|
||||
assert final_task.status is not None
|
||||
assert final_task.status.state == TaskState.completed
|
||||
|
||||
|
||||
class TestA2APushNotificationHandler:
|
||||
"""Tests for push notification handler.
|
||||
|
||||
These tests use mocks for the result store since webhook callbacks
|
||||
are incoming requests that can't be recorded with VCR.
|
||||
"""
|
||||
|
||||
@pytest.fixture
|
||||
def mock_agent_card(self) -> AgentCard:
|
||||
"""Create a minimal valid agent card for testing."""
|
||||
from a2a.types import AgentCapabilities
|
||||
|
||||
return AgentCard(
|
||||
name="Test Agent",
|
||||
description="Test agent for push notification tests",
|
||||
url="http://localhost:9999",
|
||||
version="1.0.0",
|
||||
capabilities=AgentCapabilities(streaming=True, push_notifications=True),
|
||||
default_input_modes=["text"],
|
||||
default_output_modes=["text"],
|
||||
skills=[],
|
||||
)
|
||||
|
||||
@pytest.fixture
|
||||
def mock_task(self) -> "Task":
|
||||
"""Create a minimal valid task for testing."""
|
||||
from a2a.types import Task, TaskStatus
|
||||
|
||||
return Task(
|
||||
id="task-123",
|
||||
context_id="ctx-123",
|
||||
status=TaskStatus(state=TaskState.working),
|
||||
)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_push_handler_waits_for_result(
|
||||
self,
|
||||
mock_agent_card: AgentCard,
|
||||
mock_task,
|
||||
) -> None:
|
||||
"""Test that push handler waits for result from store."""
|
||||
from unittest.mock import AsyncMock, MagicMock
|
||||
|
||||
from a2a.types import Task, TaskStatus
|
||||
from pydantic import AnyHttpUrl
|
||||
|
||||
from crewai.a2a.updates.push_notifications.config import PushNotificationConfig
|
||||
from crewai.a2a.updates.push_notifications.handler import PushNotificationHandler
|
||||
|
||||
completed_task = Task(
|
||||
id="task-123",
|
||||
context_id="ctx-123",
|
||||
status=TaskStatus(state=TaskState.completed),
|
||||
history=[],
|
||||
)
|
||||
|
||||
mock_store = MagicMock()
|
||||
mock_store.wait_for_result = AsyncMock(return_value=completed_task)
|
||||
|
||||
async def mock_send_message(*args, **kwargs):
|
||||
yield (mock_task, None)
|
||||
|
||||
mock_client = MagicMock()
|
||||
mock_client.send_message = mock_send_message
|
||||
|
||||
config = PushNotificationConfig(
|
||||
url=AnyHttpUrl("http://localhost:8080/a2a/callback"),
|
||||
token="secret-token",
|
||||
result_store=mock_store,
|
||||
)
|
||||
|
||||
test_msg = Message(
|
||||
role=Role.user,
|
||||
parts=[Part(root=TextPart(text="What is 2+2?"))],
|
||||
message_id="msg-001",
|
||||
)
|
||||
|
||||
new_messages: list[Message] = []
|
||||
|
||||
result = await PushNotificationHandler.execute(
|
||||
client=mock_client,
|
||||
message=test_msg,
|
||||
new_messages=new_messages,
|
||||
agent_card=mock_agent_card,
|
||||
config=config,
|
||||
result_store=mock_store,
|
||||
polling_timeout=30.0,
|
||||
polling_interval=1.0,
|
||||
)
|
||||
|
||||
mock_store.wait_for_result.assert_called_once_with(
|
||||
task_id="task-123",
|
||||
timeout=30.0,
|
||||
poll_interval=1.0,
|
||||
)
|
||||
|
||||
assert result["status"] == TaskState.completed
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_push_handler_returns_failure_on_timeout(
|
||||
self,
|
||||
mock_agent_card: AgentCard,
|
||||
) -> None:
|
||||
"""Test that push handler returns failure when result store times out."""
|
||||
from unittest.mock import AsyncMock, MagicMock
|
||||
|
||||
from a2a.types import Task, TaskStatus
|
||||
from pydantic import AnyHttpUrl
|
||||
|
||||
from crewai.a2a.updates.push_notifications.config import PushNotificationConfig
|
||||
from crewai.a2a.updates.push_notifications.handler import PushNotificationHandler
|
||||
|
||||
mock_store = MagicMock()
|
||||
mock_store.wait_for_result = AsyncMock(return_value=None)
|
||||
|
||||
working_task = Task(
|
||||
id="task-456",
|
||||
context_id="ctx-456",
|
||||
status=TaskStatus(state=TaskState.working),
|
||||
)
|
||||
|
||||
async def mock_send_message(*args, **kwargs):
|
||||
yield (working_task, None)
|
||||
|
||||
mock_client = MagicMock()
|
||||
mock_client.send_message = mock_send_message
|
||||
|
||||
config = PushNotificationConfig(
|
||||
url=AnyHttpUrl("http://localhost:8080/a2a/callback"),
|
||||
token="token",
|
||||
result_store=mock_store,
|
||||
)
|
||||
|
||||
test_msg = Message(
|
||||
role=Role.user,
|
||||
parts=[Part(root=TextPart(text="test"))],
|
||||
message_id="msg-002",
|
||||
)
|
||||
|
||||
new_messages: list[Message] = []
|
||||
|
||||
result = await PushNotificationHandler.execute(
|
||||
client=mock_client,
|
||||
message=test_msg,
|
||||
new_messages=new_messages,
|
||||
agent_card=mock_agent_card,
|
||||
config=config,
|
||||
result_store=mock_store,
|
||||
polling_timeout=5.0,
|
||||
polling_interval=0.5,
|
||||
)
|
||||
|
||||
assert result["status"] == TaskState.failed
|
||||
assert "timeout" in result.get("error", "").lower()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_push_handler_requires_config(
|
||||
self,
|
||||
mock_agent_card: AgentCard,
|
||||
) -> None:
|
||||
"""Test that push handler fails gracefully without config."""
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from crewai.a2a.updates.push_notifications.handler import PushNotificationHandler
|
||||
|
||||
mock_client = MagicMock()
|
||||
|
||||
test_msg = Message(
|
||||
role=Role.user,
|
||||
parts=[Part(root=TextPart(text="test"))],
|
||||
message_id="msg-003",
|
||||
)
|
||||
|
||||
new_messages: list[Message] = []
|
||||
|
||||
result = await PushNotificationHandler.execute(
|
||||
client=mock_client,
|
||||
message=test_msg,
|
||||
new_messages=new_messages,
|
||||
agent_card=mock_agent_card,
|
||||
)
|
||||
|
||||
assert result["status"] == TaskState.failed
|
||||
assert "config" in result.get("error", "").lower()
|
||||
@@ -0,0 +1,44 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: ''
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- '*/*'
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
host:
|
||||
- localhost:9999
|
||||
method: GET
|
||||
uri: http://localhost:9999/.well-known/agent-card.json
|
||||
response:
|
||||
body:
|
||||
string: '{"capabilities":{"streaming":true},"defaultInputModes":["text"],"defaultOutputModes":["text"],"description":"An
|
||||
AI assistant powered by OpenAI GPT with calculator and time tools. Ask questions,
|
||||
perform calculations, or get the current time in any timezone.","name":"GPT
|
||||
Assistant","preferredTransport":"JSONRPC","protocolVersion":"0.3.0","skills":[{"description":"Have
|
||||
a general conversation with the AI assistant. Ask questions, get explanations,
|
||||
or just chat.","examples":["Hello, how are you?","Explain quantum computing
|
||||
in simple terms","What can you help me with?"],"id":"conversation","name":"General
|
||||
Conversation","tags":["chat","conversation","general"]},{"description":"Perform
|
||||
mathematical calculations including arithmetic, exponents, and more.","examples":["What
|
||||
is 25 * 17?","Calculate 2^10","What''s (100 + 50) / 3?"],"id":"calculator","name":"Calculator","tags":["math","calculator","arithmetic"]},{"description":"Get
|
||||
the current date and time in any timezone.","examples":["What time is it?","What''s
|
||||
the current time in Tokyo?","What''s today''s date in New York?"],"id":"time","name":"Current
|
||||
Time","tags":["time","date","timezone"]}],"url":"http://localhost:9999/","version":"1.0.0"}'
|
||||
headers:
|
||||
content-length:
|
||||
- '1198'
|
||||
content-type:
|
||||
- application/json
|
||||
date:
|
||||
- Tue, 06 Jan 2026 14:17:00 GMT
|
||||
server:
|
||||
- uvicorn
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,126 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: ''
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- '*/*'
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
host:
|
||||
- localhost:9999
|
||||
method: GET
|
||||
uri: http://localhost:9999/.well-known/agent-card.json
|
||||
response:
|
||||
body:
|
||||
string: '{"capabilities":{"streaming":true},"defaultInputModes":["text"],"defaultOutputModes":["text"],"description":"An
|
||||
AI assistant powered by OpenAI GPT with calculator and time tools. Ask questions,
|
||||
perform calculations, or get the current time in any timezone.","name":"GPT
|
||||
Assistant","preferredTransport":"JSONRPC","protocolVersion":"0.3.0","skills":[{"description":"Have
|
||||
a general conversation with the AI assistant. Ask questions, get explanations,
|
||||
or just chat.","examples":["Hello, how are you?","Explain quantum computing
|
||||
in simple terms","What can you help me with?"],"id":"conversation","name":"General
|
||||
Conversation","tags":["chat","conversation","general"]},{"description":"Perform
|
||||
mathematical calculations including arithmetic, exponents, and more.","examples":["What
|
||||
is 25 * 17?","Calculate 2^10","What''s (100 + 50) / 3?"],"id":"calculator","name":"Calculator","tags":["math","calculator","arithmetic"]},{"description":"Get
|
||||
the current date and time in any timezone.","examples":["What time is it?","What''s
|
||||
the current time in Tokyo?","What''s today''s date in New York?"],"id":"time","name":"Current
|
||||
Time","tags":["time","date","timezone"]}],"url":"http://localhost:9999/","version":"1.0.0"}'
|
||||
headers:
|
||||
content-length:
|
||||
- '1198'
|
||||
content-type:
|
||||
- application/json
|
||||
date:
|
||||
- Tue, 06 Jan 2026 14:16:58 GMT
|
||||
server:
|
||||
- uvicorn
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"id":"e5ac2160-ae9b-4bf9-aad7-14bf0d53d6d9","jsonrpc":"2.0","method":"message/stream","params":{"configuration":{"acceptedOutputModes":[],"blocking":true},"message":{"kind":"message","messageId":"e1e63c75-3ea0-49fb-b512-5128a2476416","parts":[{"kind":"text","text":"What
|
||||
is 2 + 2?"}],"role":"user"}}}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- '*/*, text/event-stream'
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
cache-control:
|
||||
- no-store
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '301'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- localhost:9999
|
||||
method: POST
|
||||
uri: http://localhost:9999/
|
||||
response:
|
||||
body:
|
||||
string: "data: {\"id\":\"e5ac2160-ae9b-4bf9-aad7-14bf0d53d6d9\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"b9e14c1b-734d-4d1e-864a-e6dda5231d71\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"submitted\"},\"taskId\":\"0dd4d3af-f35d-409d-9462-01218e5641f9\"}}\r\n\r\ndata:
|
||||
{\"id\":\"e5ac2160-ae9b-4bf9-aad7-14bf0d53d6d9\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"b9e14c1b-734d-4d1e-864a-e6dda5231d71\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"working\"},\"taskId\":\"0dd4d3af-f35d-409d-9462-01218e5641f9\"}}\r\n\r\ndata:
|
||||
{\"id\":\"e5ac2160-ae9b-4bf9-aad7-14bf0d53d6d9\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"b9e14c1b-734d-4d1e-864a-e6dda5231d71\",\"final\":true,\"kind\":\"status-update\",\"status\":{\"message\":{\"kind\":\"message\",\"messageId\":\"54bb7ff3-f2c0-4eb3-b427-bf1c8cf90832\",\"parts\":[{\"kind\":\"text\",\"text\":\"\\n[Tool:
|
||||
calculator] 2 + 2 = 4\\n2 + 2 equals 4.\"}],\"role\":\"agent\"},\"state\":\"completed\"},\"taskId\":\"0dd4d3af-f35d-409d-9462-01218e5641f9\"}}\r\n\r\n"
|
||||
headers:
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
cache-control:
|
||||
- no-store
|
||||
connection:
|
||||
- keep-alive
|
||||
content-type:
|
||||
- text/event-stream; charset=utf-8
|
||||
date:
|
||||
- Tue, 06 Jan 2026 14:16:58 GMT
|
||||
server:
|
||||
- uvicorn
|
||||
x-accel-buffering:
|
||||
- 'no'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"id":"cb1e4af3-d2d0-4848-96b8-7082ee6171d1","jsonrpc":"2.0","method":"tasks/get","params":{"historyLength":100,"id":"0dd4d3af-f35d-409d-9462-01218e5641f9"}}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- '*/*'
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '157'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- localhost:9999
|
||||
method: POST
|
||||
uri: http://localhost:9999/
|
||||
response:
|
||||
body:
|
||||
string: '{"id":"cb1e4af3-d2d0-4848-96b8-7082ee6171d1","jsonrpc":"2.0","result":{"contextId":"b9e14c1b-734d-4d1e-864a-e6dda5231d71","history":[{"contextId":"b9e14c1b-734d-4d1e-864a-e6dda5231d71","kind":"message","messageId":"e1e63c75-3ea0-49fb-b512-5128a2476416","parts":[{"kind":"text","text":"What
|
||||
is 2 + 2?"}],"role":"user","taskId":"0dd4d3af-f35d-409d-9462-01218e5641f9"}],"id":"0dd4d3af-f35d-409d-9462-01218e5641f9","kind":"task","status":{"message":{"kind":"message","messageId":"54bb7ff3-f2c0-4eb3-b427-bf1c8cf90832","parts":[{"kind":"text","text":"\n[Tool:
|
||||
calculator] 2 + 2 = 4\n2 + 2 equals 4."}],"role":"agent"},"state":"completed"}}}'
|
||||
headers:
|
||||
content-length:
|
||||
- '635'
|
||||
content-type:
|
||||
- application/json
|
||||
date:
|
||||
- Tue, 06 Jan 2026 14:17:00 GMT
|
||||
server:
|
||||
- uvicorn
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,90 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: ''
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- '*/*'
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
host:
|
||||
- localhost:9999
|
||||
method: GET
|
||||
uri: http://localhost:9999/.well-known/agent-card.json
|
||||
response:
|
||||
body:
|
||||
string: '{"capabilities":{"streaming":true},"defaultInputModes":["text"],"defaultOutputModes":["text"],"description":"An
|
||||
AI assistant powered by OpenAI GPT with calculator and time tools. Ask questions,
|
||||
perform calculations, or get the current time in any timezone.","name":"GPT
|
||||
Assistant","preferredTransport":"JSONRPC","protocolVersion":"0.3.0","skills":[{"description":"Have
|
||||
a general conversation with the AI assistant. Ask questions, get explanations,
|
||||
or just chat.","examples":["Hello, how are you?","Explain quantum computing
|
||||
in simple terms","What can you help me with?"],"id":"conversation","name":"General
|
||||
Conversation","tags":["chat","conversation","general"]},{"description":"Perform
|
||||
mathematical calculations including arithmetic, exponents, and more.","examples":["What
|
||||
is 25 * 17?","Calculate 2^10","What''s (100 + 50) / 3?"],"id":"calculator","name":"Calculator","tags":["math","calculator","arithmetic"]},{"description":"Get
|
||||
the current date and time in any timezone.","examples":["What time is it?","What''s
|
||||
the current time in Tokyo?","What''s today''s date in New York?"],"id":"time","name":"Current
|
||||
Time","tags":["time","date","timezone"]}],"url":"http://localhost:9999/","version":"1.0.0"}'
|
||||
headers:
|
||||
content-length:
|
||||
- '1198'
|
||||
content-type:
|
||||
- application/json
|
||||
date:
|
||||
- Tue, 06 Jan 2026 14:17:02 GMT
|
||||
server:
|
||||
- uvicorn
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"id":"8cf25b61-8884-4246-adce-fccb32e176ab","jsonrpc":"2.0","method":"message/stream","params":{"configuration":{"acceptedOutputModes":[],"blocking":true},"message":{"kind":"message","messageId":"c145297f-7331-4835-adcc-66b51de92a2b","parts":[{"kind":"text","text":"What
|
||||
is 2 + 2?"}],"role":"user"}}}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- '*/*, text/event-stream'
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
cache-control:
|
||||
- no-store
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '301'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- localhost:9999
|
||||
method: POST
|
||||
uri: http://localhost:9999/
|
||||
response:
|
||||
body:
|
||||
string: "data: {\"id\":\"8cf25b61-8884-4246-adce-fccb32e176ab\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"30601267-ab3b-48ef-afc8-916c37a18651\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"submitted\"},\"taskId\":\"3083d3da-4739-4f4f-a4e8-7c048ea819c1\"}}\r\n\r\ndata:
|
||||
{\"id\":\"8cf25b61-8884-4246-adce-fccb32e176ab\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"30601267-ab3b-48ef-afc8-916c37a18651\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"working\"},\"taskId\":\"3083d3da-4739-4f4f-a4e8-7c048ea819c1\"}}\r\n\r\ndata:
|
||||
{\"id\":\"8cf25b61-8884-4246-adce-fccb32e176ab\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"30601267-ab3b-48ef-afc8-916c37a18651\",\"final\":true,\"kind\":\"status-update\",\"status\":{\"message\":{\"kind\":\"message\",\"messageId\":\"25f81e3c-b7e8-48b5-a98a-4066f3637a13\",\"parts\":[{\"kind\":\"text\",\"text\":\"\\n[Tool:
|
||||
calculator] 2 + 2 = 4\\n2 + 2 equals 4.\"}],\"role\":\"agent\"},\"state\":\"completed\"},\"taskId\":\"3083d3da-4739-4f4f-a4e8-7c048ea819c1\"}}\r\n\r\n"
|
||||
headers:
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
cache-control:
|
||||
- no-store
|
||||
connection:
|
||||
- keep-alive
|
||||
content-type:
|
||||
- text/event-stream; charset=utf-8
|
||||
date:
|
||||
- Tue, 06 Jan 2026 14:17:02 GMT
|
||||
server:
|
||||
- uvicorn
|
||||
x-accel-buffering:
|
||||
- 'no'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,90 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: ''
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- '*/*'
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
host:
|
||||
- localhost:9999
|
||||
method: GET
|
||||
uri: http://localhost:9999/.well-known/agent-card.json
|
||||
response:
|
||||
body:
|
||||
string: '{"capabilities":{"streaming":true},"defaultInputModes":["text"],"defaultOutputModes":["text"],"description":"An
|
||||
AI assistant powered by OpenAI GPT with calculator and time tools. Ask questions,
|
||||
perform calculations, or get the current time in any timezone.","name":"GPT
|
||||
Assistant","preferredTransport":"JSONRPC","protocolVersion":"0.3.0","skills":[{"description":"Have
|
||||
a general conversation with the AI assistant. Ask questions, get explanations,
|
||||
or just chat.","examples":["Hello, how are you?","Explain quantum computing
|
||||
in simple terms","What can you help me with?"],"id":"conversation","name":"General
|
||||
Conversation","tags":["chat","conversation","general"]},{"description":"Perform
|
||||
mathematical calculations including arithmetic, exponents, and more.","examples":["What
|
||||
is 25 * 17?","Calculate 2^10","What''s (100 + 50) / 3?"],"id":"calculator","name":"Calculator","tags":["math","calculator","arithmetic"]},{"description":"Get
|
||||
the current date and time in any timezone.","examples":["What time is it?","What''s
|
||||
the current time in Tokyo?","What''s today''s date in New York?"],"id":"time","name":"Current
|
||||
Time","tags":["time","date","timezone"]}],"url":"http://localhost:9999/","version":"1.0.0"}'
|
||||
headers:
|
||||
content-length:
|
||||
- '1198'
|
||||
content-type:
|
||||
- application/json
|
||||
date:
|
||||
- Tue, 06 Jan 2026 14:17:00 GMT
|
||||
server:
|
||||
- uvicorn
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"id":"3a17c6bf-8db6-45a6-8535-34c45c0c4936","jsonrpc":"2.0","method":"message/stream","params":{"configuration":{"acceptedOutputModes":[],"blocking":true},"message":{"kind":"message","messageId":"712558a3-6d92-4591-be8a-9dd8566dde82","parts":[{"kind":"text","text":"What
|
||||
is 2 + 2?"}],"role":"user"}}}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- '*/*, text/event-stream'
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
cache-control:
|
||||
- no-store
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '301'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- localhost:9999
|
||||
method: POST
|
||||
uri: http://localhost:9999/
|
||||
response:
|
||||
body:
|
||||
string: "data: {\"id\":\"3a17c6bf-8db6-45a6-8535-34c45c0c4936\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"ca2fbbc9-761e-45d9-a929-0c68b1f8acbf\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"submitted\"},\"taskId\":\"c6e88db0-36e9-4269-8b9a-ecb6dfdcf6a1\"}}\r\n\r\ndata:
|
||||
{\"id\":\"3a17c6bf-8db6-45a6-8535-34c45c0c4936\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"ca2fbbc9-761e-45d9-a929-0c68b1f8acbf\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"working\"},\"taskId\":\"c6e88db0-36e9-4269-8b9a-ecb6dfdcf6a1\"}}\r\n\r\ndata:
|
||||
{\"id\":\"3a17c6bf-8db6-45a6-8535-34c45c0c4936\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"ca2fbbc9-761e-45d9-a929-0c68b1f8acbf\",\"final\":true,\"kind\":\"status-update\",\"status\":{\"message\":{\"kind\":\"message\",\"messageId\":\"916324aa-fd25-4849-bceb-c4644e2fcbb0\",\"parts\":[{\"kind\":\"text\",\"text\":\"\\n[Tool:
|
||||
calculator] 2 + 2 = 4\\n2 + 2 equals 4.\"}],\"role\":\"agent\"},\"state\":\"completed\"},\"taskId\":\"c6e88db0-36e9-4269-8b9a-ecb6dfdcf6a1\"}}\r\n\r\n"
|
||||
headers:
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
cache-control:
|
||||
- no-store
|
||||
connection:
|
||||
- keep-alive
|
||||
content-type:
|
||||
- text/event-stream; charset=utf-8
|
||||
date:
|
||||
- Tue, 06 Jan 2026 14:17:00 GMT
|
||||
server:
|
||||
- uvicorn
|
||||
x-accel-buffering:
|
||||
- 'no'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,113 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"user","content":"Tell me a joke."}],"model":"gpt-4o-mini","stop":[]}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '90'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- async:asyncio
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 2.14.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.14
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-CvErx9mbnUKFHKkhPChO93eUzKJqy\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1767757889,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Why did the scarecrow win an award?
|
||||
\\n\\nBecause he was outstanding in his field!\",\n \"refusal\": null,\n
|
||||
\ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 12,\n \"completion_tokens\":
|
||||
18,\n \"total_tokens\": 30,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_29330a9688\"\n}\n"
|
||||
headers:
|
||||
Access-Control-Expose-Headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 07 Jan 2026 03:51:29 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
content-length:
|
||||
- '887'
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '466'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '483'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,113 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"user","content":"Tell me a joke."}],"model":"gpt-4o-mini","stop":[]}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '90'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 2.14.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.14
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-CugAsv9iAHdiGddGDHcZWEp7ZV7cB\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1767624522,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Why don't skeletons fight each other?
|
||||
\\n\\nThey don't have the guts!\",\n \"refusal\": null,\n \"annotations\":
|
||||
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 12,\n \"completion_tokens\":
|
||||
15,\n \"total_tokens\": 27,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_29330a9688\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Mon, 05 Jan 2026 14:48:43 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
content-length:
|
||||
- '874'
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '424'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '1017'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,179 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"user","content":"Tell me a joke."}],"model":"gpt-4o-mini","stop":[],"stream":true,"stream_options":{"include_usage":true}}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '144'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- async:asyncio
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 2.14.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.14
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"k9LESwMhk"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"Why"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"tYMBX9z8"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
|
||||
did"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"X5lpC48"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
|
||||
the"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"Ns5pnmO"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
|
||||
scare"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"cUTYl"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"crow"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"ZvHPszH"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
|
||||
win"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"pLKQ5rM"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
|
||||
an"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"Yl8vxgvM"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
|
||||
award"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"xfxd0"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"?"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"SFxdiZP3Uh"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
|
||||
\n\n"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"Sysruv"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"Because"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"OeZH"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
|
||||
he"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"epBJpPYm"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
|
||||
was"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"5Bofkug"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
|
||||
outstanding"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"ylIDIBTCqSLy3tA"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
|
||||
in"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"lLi2lQc4"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
|
||||
his"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"fi47Jij"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
|
||||
field"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"Kkiyw"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"!"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"RMcUfqa93e"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}],"usage":null,"obfuscation":"rAtJI"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CvF96exJN1ZmQQ0zfOWhGs2UqetwZ","object":"chat.completion.chunk","created":1767758952,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[],"usage":{"prompt_tokens":12,"completion_tokens":18,"total_tokens":30,"prompt_tokens_details":{"cached_tokens":0,"audio_tokens":0},"completion_tokens_details":{"reasoning_tokens":0,"audio_tokens":0,"accepted_prediction_tokens":0,"rejected_prediction_tokens":0}},"obfuscation":"aTyTRaiahL"}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
Access-Control-Expose-Headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Wed, 07 Jan 2026 04:09:13 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '243'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '645'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,179 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"user","content":"Tell me a joke."}],"model":"gpt-4o-mini","stop":[],"stream":true,"stream_options":{"include_usage":true}}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '144'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- COOKIE-XXX
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 2.14.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.14
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"SVnFynat2"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"Why"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"M0Y4Qurw"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
|
||||
did"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"LknkzkM"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
|
||||
the"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"45ePnqI"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
|
||||
scare"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"DsJ1r"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"crow"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"9sXjMg0"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
|
||||
win"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"UlTRXCu"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
|
||||
an"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"He218dPh"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
|
||||
award"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"CO1Dc"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"?"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"nHS3XxEjuW"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
|
||||
\n\n"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"IhBQDR"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"Because"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"TJzX"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
|
||||
he"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"AjRyStfn"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
|
||||
was"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"2AZtzyA"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
|
||||
outstanding"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"XfziOItr8wziIap"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
|
||||
in"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"7hXp54s6"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
|
||||
his"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"RPmgnK3"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"
|
||||
field"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"uqtNk"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{"content":"!"},"logprobs":null,"finish_reason":null}],"usage":null,"obfuscation":"Wziup4uj7N"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}],"usage":null,"obfuscation":"q9paY"}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-CugAuE9ctOkFjqIbmxWZpxeNX7gWt","object":"chat.completion.chunk","created":1767624524,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_29330a9688","choices":[],"usage":{"prompt_tokens":12,"completion_tokens":18,"total_tokens":30,"prompt_tokens_details":{"cached_tokens":0,"audio_tokens":0},"completion_tokens_details":{"reasoning_tokens":0,"audio_tokens":0,"accepted_prediction_tokens":0,"rejected_prediction_tokens":0}},"obfuscation":"TWmOWpZx0s"}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Mon, 05 Jan 2026 14:48:44 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '227'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '645'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -877,3 +877,116 @@ def test_validate_model_in_constants():
|
||||
LLM._validate_model_in_constants("anthropic.claude-future-v1:0", "bedrock")
|
||||
is True
|
||||
)
|
||||
|
||||
@pytest.mark.vcr(record_mode="once",decode_compressed_response=True)
|
||||
def test_usage_info_non_streaming_with_call():
|
||||
llm = LLM(model="gpt-4o-mini", is_litellm=True)
|
||||
assert llm._token_usage == {
|
||||
"total_tokens": 0,
|
||||
"prompt_tokens": 0,
|
||||
"completion_tokens": 0,
|
||||
"successful_requests": 0,
|
||||
"cached_prompt_tokens": 0,
|
||||
}
|
||||
assert llm.stream is False
|
||||
|
||||
with patch.object(
|
||||
llm, "_handle_non_streaming_response", wraps=llm._handle_non_streaming_response
|
||||
) as mock_handle:
|
||||
llm.call("Tell me a joke.")
|
||||
mock_handle.assert_called_once()
|
||||
|
||||
assert llm._token_usage["total_tokens"] > 0
|
||||
assert llm._token_usage["prompt_tokens"] > 0
|
||||
assert llm._token_usage["completion_tokens"] > 0
|
||||
assert llm._token_usage["successful_requests"] == 1
|
||||
|
||||
|
||||
@pytest.mark.vcr(record_mode="once",decode_compressed_response=True)
|
||||
def test_usage_info_streaming_with_call():
|
||||
llm = LLM(model="gpt-4o-mini", is_litellm=True, stream=True)
|
||||
assert llm._token_usage == {
|
||||
"total_tokens": 0,
|
||||
"prompt_tokens": 0,
|
||||
"completion_tokens": 0,
|
||||
"successful_requests": 0,
|
||||
"cached_prompt_tokens": 0,
|
||||
}
|
||||
assert llm.stream is True
|
||||
|
||||
with patch.object(
|
||||
llm, "_handle_streaming_response", wraps=llm._handle_streaming_response
|
||||
) as mock_handle:
|
||||
llm.call("Tell me a joke.")
|
||||
mock_handle.assert_called_once()
|
||||
|
||||
assert llm._token_usage["total_tokens"] > 0
|
||||
assert llm._token_usage["prompt_tokens"] > 0
|
||||
assert llm._token_usage["completion_tokens"] > 0
|
||||
assert llm._token_usage["successful_requests"] == 1
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.vcr(record_mode="once",decode_compressed_response=True,match_on=["method", "scheme", "host", "path", "body"])
|
||||
async def test_usage_info_non_streaming_with_acall():
|
||||
llm = LLM(
|
||||
model="openai/gpt-4o-mini",
|
||||
is_litellm=True,
|
||||
stream=False,
|
||||
)
|
||||
|
||||
# sanity check
|
||||
assert llm._token_usage == {
|
||||
"total_tokens": 0,
|
||||
"prompt_tokens": 0,
|
||||
"completion_tokens": 0,
|
||||
"successful_requests": 0,
|
||||
"cached_prompt_tokens": 0,
|
||||
}
|
||||
|
||||
with patch.object(
|
||||
llm, "_ahandle_non_streaming_response", wraps=llm._ahandle_non_streaming_response
|
||||
) as mock_handle:
|
||||
result = await llm.acall("Tell me a joke.")
|
||||
mock_handle.assert_called_once()
|
||||
|
||||
# token usage assertions (robust)
|
||||
assert llm._token_usage["successful_requests"] == 1
|
||||
assert llm._token_usage["prompt_tokens"] > 0
|
||||
assert llm._token_usage["completion_tokens"] > 0
|
||||
assert llm._token_usage["total_tokens"] > 0
|
||||
|
||||
assert len(result) > 0
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.vcr(record_mode="none",decode_compressed_response=True,match_on=["method", "scheme", "host", "path", "body"])
|
||||
async def test_usage_info_streaming_with_acall():
|
||||
llm = LLM(
|
||||
model="gpt-4o-mini",
|
||||
is_litellm=True,
|
||||
stream=True,
|
||||
)
|
||||
|
||||
assert llm.stream is True
|
||||
assert llm._token_usage == {
|
||||
"total_tokens": 0,
|
||||
"prompt_tokens": 0,
|
||||
"completion_tokens": 0,
|
||||
"successful_requests": 0,
|
||||
"cached_prompt_tokens": 0,
|
||||
}
|
||||
|
||||
with patch.object(
|
||||
llm, "_ahandle_streaming_response", wraps=llm._ahandle_streaming_response
|
||||
) as mock_handle:
|
||||
result = await llm.acall("Tell me a joke.")
|
||||
mock_handle.assert_called_once()
|
||||
|
||||
|
||||
assert llm._token_usage["successful_requests"] == 1
|
||||
assert llm._token_usage["prompt_tokens"] > 0
|
||||
assert llm._token_usage["completion_tokens"] > 0
|
||||
assert llm._token_usage["total_tokens"] > 0
|
||||
|
||||
assert len(result) > 0
|
||||
@@ -25,6 +25,8 @@ from crewai.events.types.flow_events import (
|
||||
FlowCreatedEvent,
|
||||
FlowFinishedEvent,
|
||||
FlowStartedEvent,
|
||||
HumanFeedbackReceivedEvent,
|
||||
HumanFeedbackRequestedEvent,
|
||||
MethodExecutionFailedEvent,
|
||||
MethodExecutionFinishedEvent,
|
||||
MethodExecutionStartedEvent,
|
||||
@@ -45,6 +47,7 @@ from crewai.events.types.tool_usage_events import (
|
||||
ToolUsageFinishedEvent,
|
||||
)
|
||||
from crewai.flow.flow import Flow, listen, start
|
||||
from crewai.flow.human_feedback import human_feedback
|
||||
from crewai.llm import LLM
|
||||
from crewai.task import Task
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
@@ -1273,3 +1276,135 @@ def test_llm_emits_event_with_lite_agent():
|
||||
|
||||
assert set(all_agent_roles) == {agent.role}
|
||||
assert set(all_agent_id) == {str(agent.id)}
|
||||
|
||||
|
||||
# ----------- HUMAN FEEDBACK EVENTS -----------
|
||||
|
||||
|
||||
@patch("builtins.input", return_value="looks good")
|
||||
@patch("builtins.print")
|
||||
def test_human_feedback_emits_requested_and_received_events(mock_print, mock_input):
|
||||
"""Test that @human_feedback decorator emits HumanFeedbackRequested and Received events."""
|
||||
requested_events = []
|
||||
received_events = []
|
||||
events_received = threading.Event()
|
||||
|
||||
@crewai_event_bus.on(HumanFeedbackRequestedEvent)
|
||||
def handle_requested(source, event):
|
||||
requested_events.append(event)
|
||||
|
||||
@crewai_event_bus.on(HumanFeedbackReceivedEvent)
|
||||
def handle_received(source, event):
|
||||
received_events.append(event)
|
||||
events_received.set()
|
||||
|
||||
class TestFlow(Flow):
|
||||
@start()
|
||||
@human_feedback(
|
||||
message="Review:",
|
||||
emit=["approved", "rejected"],
|
||||
llm="gpt-4o-mini",
|
||||
)
|
||||
def review(self):
|
||||
return "test content"
|
||||
|
||||
flow = TestFlow()
|
||||
|
||||
with patch.object(flow, "_collapse_to_outcome", return_value="approved"):
|
||||
flow.kickoff()
|
||||
|
||||
assert events_received.wait(timeout=5), (
|
||||
"Timeout waiting for human feedback events"
|
||||
)
|
||||
|
||||
assert len(requested_events) == 1
|
||||
assert requested_events[0].type == "human_feedback_requested"
|
||||
assert requested_events[0].emit == ["approved", "rejected"]
|
||||
assert requested_events[0].message == "Review:"
|
||||
assert requested_events[0].output == "test content"
|
||||
|
||||
assert len(received_events) == 1
|
||||
assert received_events[0].type == "human_feedback_received"
|
||||
assert received_events[0].feedback == "looks good"
|
||||
assert received_events[0].outcome is None
|
||||
|
||||
assert flow.last_human_feedback is not None
|
||||
assert flow.last_human_feedback.outcome == "approved"
|
||||
|
||||
|
||||
@patch("builtins.input", return_value="feedback text")
|
||||
@patch("builtins.print")
|
||||
def test_human_feedback_without_routing_emits_events(mock_print, mock_input):
|
||||
"""Test that @human_feedback without emit still emits events."""
|
||||
requested_events = []
|
||||
received_events = []
|
||||
events_received = threading.Event()
|
||||
|
||||
@crewai_event_bus.on(HumanFeedbackRequestedEvent)
|
||||
def handle_requested(source, event):
|
||||
requested_events.append(event)
|
||||
|
||||
@crewai_event_bus.on(HumanFeedbackReceivedEvent)
|
||||
def handle_received(source, event):
|
||||
received_events.append(event)
|
||||
events_received.set()
|
||||
|
||||
class SimpleFlow(Flow):
|
||||
@start()
|
||||
@human_feedback(message="Please review:")
|
||||
def review(self):
|
||||
return "content to review"
|
||||
|
||||
flow = SimpleFlow()
|
||||
flow.kickoff()
|
||||
|
||||
assert events_received.wait(timeout=5), (
|
||||
"Timeout waiting for human feedback events"
|
||||
)
|
||||
|
||||
assert len(requested_events) == 1
|
||||
assert requested_events[0].emit is None
|
||||
|
||||
assert len(received_events) == 1
|
||||
assert received_events[0].feedback == "feedback text"
|
||||
assert received_events[0].outcome is None
|
||||
|
||||
|
||||
@patch("builtins.input", return_value="")
|
||||
@patch("builtins.print")
|
||||
def test_human_feedback_empty_feedback_emits_events(mock_print, mock_input):
|
||||
"""Test that empty feedback (skipped) still emits events correctly."""
|
||||
received_events = []
|
||||
events_received = threading.Event()
|
||||
|
||||
@crewai_event_bus.on(HumanFeedbackReceivedEvent)
|
||||
def handle_received(source, event):
|
||||
received_events.append(event)
|
||||
events_received.set()
|
||||
|
||||
class SkipFlow(Flow):
|
||||
@start()
|
||||
@human_feedback(
|
||||
message="Review:",
|
||||
emit=["approved", "rejected"],
|
||||
llm="gpt-4o-mini",
|
||||
default_outcome="rejected",
|
||||
)
|
||||
def review(self):
|
||||
return "content"
|
||||
|
||||
flow = SkipFlow()
|
||||
flow.kickoff()
|
||||
|
||||
assert events_received.wait(timeout=5), (
|
||||
"Timeout waiting for human feedback events"
|
||||
)
|
||||
|
||||
|
||||
assert len(received_events) == 1
|
||||
assert received_events[0].feedback == ""
|
||||
assert received_events[0].outcome is None
|
||||
|
||||
|
||||
assert flow.last_human_feedback is not None
|
||||
assert flow.last_human_feedback.outcome == "rejected"
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
"""CrewAI development tools."""
|
||||
|
||||
__version__ = "1.7.2"
|
||||
__version__ = "1.8.0"
|
||||
|
||||
Reference in New Issue
Block a user