refactor: extract a2a into standalone crewai-a2a package

Move crewai.a2a into lib/crewai-a2a as its own workspace package, importable as crewai_a2a. The crewai[a2a] extra now pulls in crewai-a2a, which owns a2a-sdk, httpx-auth, httpx-sse, and aiocache.

crewai.a2a stays importable. Its __init__ is a compat shim that installs a meta-path finder routing crewai.a2a.* to crewai_a2a.*, so existing user code keeps working untouched.

a2a tests and cassettes moved alongside the package under lib/crewai-a2a/tests/. Added that path to the mypy and ruff per-file-ignores lists to match the other test dirs.
This commit is contained in:
Greyson LaLonde
2026-04-14 22:21:31 +08:00
parent 0dba95e166
commit 080c22678a
87 changed files with 415 additions and 321 deletions

View File

@@ -99,10 +99,7 @@ anthropic = [
"anthropic~=0.73.0",
]
a2a = [
"a2a-sdk~=0.3.10",
"httpx-auth~=0.23.1",
"httpx-sse~=0.4.0",
"aiocache[redis,memcached]~=0.12.3",
"crewai-a2a",
]
file-processing = [
"crewai-files",
@@ -140,6 +137,7 @@ torchvision = [
{ index = "pytorch", marker = "python_version < '3.13'" },
]
crewai-files = { workspace = true }
crewai-a2a = { workspace = true }
[build-system]

View File

@@ -1,10 +1,57 @@
"""Agent-to-Agent (A2A) protocol communication module for CrewAI."""
"""Compatibility shim: ``crewai.a2a`` re-exports :mod:`crewai_a2a`.
from crewai.a2a.config import A2AClientConfig, A2AConfig, A2AServerConfig
The package lives in the ``crewai-a2a`` distribution (install via the
``crewai[a2a]`` extra). This module aliases the old import path so existing
code using ``crewai.a2a.*`` keeps working.
"""
from __future__ import annotations
from collections.abc import Sequence
import importlib
from importlib.abc import Loader, MetaPathFinder
from importlib.machinery import ModuleSpec
import sys
from types import ModuleType
__all__ = [
"A2AClientConfig",
"A2AConfig",
"A2AServerConfig",
]
try:
import crewai_a2a as _crewai_a2a
except ImportError as exc:
raise ImportError(
"crewai.a2a requires the 'crewai-a2a' package. "
"Install it with: pip install 'crewai[a2a]'"
) from exc
class _A2AAliasFinder(MetaPathFinder, Loader):
_SRC = "crewai.a2a"
_DST = "crewai_a2a"
def find_spec(
self,
fullname: str,
path: Sequence[str] | None = None,
target: ModuleType | None = None,
) -> ModuleSpec | None:
if fullname == self._SRC or fullname.startswith(self._SRC + "."):
return ModuleSpec(fullname, self)
return None
def create_module(self, spec: ModuleSpec) -> ModuleType:
target = self._DST + spec.name[len(self._SRC) :]
module = importlib.import_module(target)
sys.modules[spec.name] = module
return module
def exec_module(self, module: ModuleType) -> None:
return None
if not any(isinstance(f, _A2AAliasFinder) for f in sys.meta_path):
sys.meta_path.insert(0, _A2AAliasFinder())
for _attr in getattr(_crewai_a2a, "__all__", []):
globals()[_attr] = getattr(_crewai_a2a, _attr)
__all__ = list(getattr(_crewai_a2a, "__all__", []))

View File

@@ -1,38 +0,0 @@
"""A2A authentication schemas."""
from crewai.a2a.auth.client_schemes import (
APIKeyAuth,
AuthScheme,
BearerTokenAuth,
ClientAuthScheme,
HTTPBasicAuth,
HTTPDigestAuth,
OAuth2AuthorizationCode,
OAuth2ClientCredentials,
TLSConfig,
)
from crewai.a2a.auth.server_schemes import (
AuthenticatedUser,
EnterpriseTokenAuth,
OIDCAuth,
ServerAuthScheme,
SimpleTokenAuth,
)
__all__ = [
"APIKeyAuth",
"AuthScheme",
"AuthenticatedUser",
"BearerTokenAuth",
"ClientAuthScheme",
"EnterpriseTokenAuth",
"HTTPBasicAuth",
"HTTPDigestAuth",
"OAuth2AuthorizationCode",
"OAuth2ClientCredentials",
"OIDCAuth",
"ServerAuthScheme",
"SimpleTokenAuth",
"TLSConfig",
]

View File

@@ -1,550 +0,0 @@
"""Authentication schemes for A2A protocol clients.
Supported authentication methods:
- Bearer tokens
- OAuth2 (Client Credentials, Authorization Code)
- API Keys (header, query, cookie)
- HTTP Basic authentication
- HTTP Digest authentication
- mTLS (mutual TLS) client certificate authentication
"""
from __future__ import annotations
from abc import ABC, abstractmethod
import asyncio
import base64
from collections.abc import Awaitable, Callable, MutableMapping
from pathlib import Path
import ssl
import time
from typing import TYPE_CHECKING, ClassVar, Literal
import urllib.parse
import httpx
from httpx import DigestAuth
from pydantic import BaseModel, ConfigDict, Field, FilePath, PrivateAttr
from typing_extensions import deprecated
if TYPE_CHECKING:
import grpc # type: ignore[import-untyped]
class TLSConfig(BaseModel):
"""TLS/mTLS configuration for secure client connections.
Supports mutual TLS (mTLS) where the client presents a certificate to the server,
and standard TLS with custom CA verification.
Attributes:
client_cert_path: Path to client certificate file (PEM format) for mTLS.
client_key_path: Path to client private key file (PEM format) for mTLS.
ca_cert_path: Path to CA certificate bundle for server verification.
verify: Whether to verify server certificates. Set False only for development.
"""
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
client_cert_path: FilePath | None = Field(
default=None,
description="Path to client certificate file (PEM format) for mTLS",
)
client_key_path: FilePath | None = Field(
default=None,
description="Path to client private key file (PEM format) for mTLS",
)
ca_cert_path: FilePath | None = Field(
default=None,
description="Path to CA certificate bundle for server verification",
)
verify: bool = Field(
default=True,
description="Whether to verify server certificates. Set False only for development.",
)
def get_httpx_ssl_context(self) -> ssl.SSLContext | bool | str:
"""Build SSL context for httpx client.
Returns:
SSL context if certificates configured, True for default verification,
False if verification disabled, or path to CA bundle.
"""
if not self.verify:
return False
if self.client_cert_path and self.client_key_path:
context = ssl.create_default_context()
if self.ca_cert_path:
context.load_verify_locations(cafile=str(self.ca_cert_path))
context.load_cert_chain(
certfile=str(self.client_cert_path),
keyfile=str(self.client_key_path),
)
return context
if self.ca_cert_path:
return str(self.ca_cert_path)
return True
def get_grpc_credentials(self) -> grpc.ChannelCredentials | None: # type: ignore[no-any-unimported]
"""Build gRPC channel credentials for secure connections.
Returns:
gRPC SSL credentials if certificates configured, None otherwise.
"""
try:
import grpc
except ImportError:
return None
if not self.verify and not self.client_cert_path:
return None
root_certs: bytes | None = None
private_key: bytes | None = None
certificate_chain: bytes | None = None
if self.ca_cert_path:
root_certs = Path(self.ca_cert_path).read_bytes()
if self.client_cert_path and self.client_key_path:
private_key = Path(self.client_key_path).read_bytes()
certificate_chain = Path(self.client_cert_path).read_bytes()
return grpc.ssl_channel_credentials(
root_certificates=root_certs,
private_key=private_key,
certificate_chain=certificate_chain,
)
class ClientAuthScheme(ABC, BaseModel):
"""Base class for client-side authentication schemes.
Client auth schemes apply credentials to outgoing requests.
Attributes:
tls: Optional TLS/mTLS configuration for secure connections.
"""
tls: TLSConfig | None = Field(
default=None,
description="TLS/mTLS configuration for secure connections",
)
@abstractmethod
async def apply_auth(
self, client: httpx.AsyncClient, headers: MutableMapping[str, str]
) -> MutableMapping[str, str]:
"""Apply authentication to request headers.
Args:
client: HTTP client for making auth requests.
headers: Current request headers.
Returns:
Updated headers with authentication applied.
"""
...
@deprecated("Use ClientAuthScheme instead", category=FutureWarning)
class AuthScheme(ClientAuthScheme):
"""Deprecated: Use ClientAuthScheme instead."""
class BearerTokenAuth(ClientAuthScheme):
"""Bearer token authentication (Authorization: Bearer <token>).
Attributes:
token: Bearer token for authentication.
"""
token: str = Field(description="Bearer token")
async def apply_auth(
self, client: httpx.AsyncClient, headers: MutableMapping[str, str]
) -> MutableMapping[str, str]:
"""Apply Bearer token to Authorization header.
Args:
client: HTTP client for making auth requests.
headers: Current request headers.
Returns:
Updated headers with Bearer token in Authorization header.
"""
headers["Authorization"] = f"Bearer {self.token}"
return headers
class HTTPBasicAuth(ClientAuthScheme):
"""HTTP Basic authentication.
Attributes:
username: Username for Basic authentication.
password: Password for Basic authentication.
"""
username: str = Field(description="Username")
password: str = Field(description="Password")
async def apply_auth(
self, client: httpx.AsyncClient, headers: MutableMapping[str, str]
) -> MutableMapping[str, str]:
"""Apply HTTP Basic authentication.
Args:
client: HTTP client for making auth requests.
headers: Current request headers.
Returns:
Updated headers with Basic auth in Authorization header.
"""
credentials = f"{self.username}:{self.password}"
encoded = base64.b64encode(credentials.encode()).decode()
headers["Authorization"] = f"Basic {encoded}"
return headers
class HTTPDigestAuth(ClientAuthScheme):
"""HTTP Digest authentication.
Note: Uses httpx-auth library for digest implementation.
Attributes:
username: Username for Digest authentication.
password: Password for Digest authentication.
"""
username: str = Field(description="Username")
password: str = Field(description="Password")
_configured_client_id: int | None = PrivateAttr(default=None)
async def apply_auth(
self, client: httpx.AsyncClient, headers: MutableMapping[str, str]
) -> MutableMapping[str, str]:
"""Digest auth is handled by httpx auth flow, not headers.
Args:
client: HTTP client for making auth requests.
headers: Current request headers.
Returns:
Unchanged headers (Digest auth handled by httpx auth flow).
"""
return headers
def configure_client(self, client: httpx.AsyncClient) -> None:
"""Configure client with Digest auth.
Idempotent: Only configures the client once. Subsequent calls on the same
client instance are no-ops to prevent overwriting auth configuration.
Args:
client: HTTP client to configure with Digest authentication.
"""
client_id = id(client)
if self._configured_client_id == client_id:
return
client.auth = DigestAuth(self.username, self.password)
self._configured_client_id = client_id
class APIKeyAuth(ClientAuthScheme):
"""API Key authentication (header, query, or cookie).
Attributes:
api_key: API key value for authentication.
location: Where to send the API key (header, query, or cookie).
name: Parameter name for the API key (default: X-API-Key).
"""
api_key: str = Field(description="API key value")
location: Literal["header", "query", "cookie"] = Field(
default="header", description="Where to send the API key"
)
name: str = Field(default="X-API-Key", description="Parameter name for the API key")
_configured_client_ids: set[int] = PrivateAttr(default_factory=set)
async def apply_auth(
self, client: httpx.AsyncClient, headers: MutableMapping[str, str]
) -> MutableMapping[str, str]:
"""Apply API key authentication.
Args:
client: HTTP client for making auth requests.
headers: Current request headers.
Returns:
Updated headers with API key (for header/cookie locations).
"""
if self.location == "header":
headers[self.name] = self.api_key
elif self.location == "cookie":
headers["Cookie"] = f"{self.name}={self.api_key}"
return headers
def configure_client(self, client: httpx.AsyncClient) -> None:
"""Configure client for query param API keys.
Idempotent: Only adds the request hook once per client instance.
Subsequent calls on the same client are no-ops to prevent hook accumulation.
Args:
client: HTTP client to configure with query param API key hook.
"""
if self.location == "query":
client_id = id(client)
if client_id in self._configured_client_ids:
return
async def _add_api_key_param(request: httpx.Request) -> None:
url = httpx.URL(request.url)
request.url = url.copy_add_param(self.name, self.api_key)
client.event_hooks["request"].append(_add_api_key_param)
self._configured_client_ids.add(client_id)
class OAuth2ClientCredentials(ClientAuthScheme):
"""OAuth2 Client Credentials flow authentication.
Thread-safe implementation with asyncio.Lock to prevent concurrent token fetches
when multiple requests share the same auth instance.
Attributes:
token_url: OAuth2 token endpoint URL.
client_id: OAuth2 client identifier.
client_secret: OAuth2 client secret.
scopes: List of required OAuth2 scopes.
"""
token_url: str = Field(description="OAuth2 token endpoint")
client_id: str = Field(description="OAuth2 client ID")
client_secret: str = Field(description="OAuth2 client secret")
scopes: list[str] = Field(
default_factory=list, description="Required OAuth2 scopes"
)
_access_token: str | None = PrivateAttr(default=None)
_token_expires_at: float | None = PrivateAttr(default=None)
_lock: asyncio.Lock = PrivateAttr(default_factory=asyncio.Lock)
async def apply_auth(
self, client: httpx.AsyncClient, headers: MutableMapping[str, str]
) -> MutableMapping[str, str]:
"""Apply OAuth2 access token to Authorization header.
Uses asyncio.Lock to ensure only one coroutine fetches tokens at a time,
preventing race conditions when multiple concurrent requests use the same
auth instance.
Args:
client: HTTP client for making token requests.
headers: Current request headers.
Returns:
Updated headers with OAuth2 access token in Authorization header.
"""
if (
self._access_token is None
or self._token_expires_at is None
or time.time() >= self._token_expires_at
):
async with self._lock:
if (
self._access_token is None
or self._token_expires_at is None
or time.time() >= self._token_expires_at
):
await self._fetch_token(client)
if self._access_token:
headers["Authorization"] = f"Bearer {self._access_token}"
return headers
async def _fetch_token(self, client: httpx.AsyncClient) -> None:
"""Fetch OAuth2 access token using client credentials flow.
Args:
client: HTTP client for making token request.
Raises:
httpx.HTTPStatusError: If token request fails.
"""
data = {
"grant_type": "client_credentials",
"client_id": self.client_id,
"client_secret": self.client_secret,
}
if self.scopes:
data["scope"] = " ".join(self.scopes)
response = await client.post(self.token_url, data=data)
response.raise_for_status()
token_data = response.json()
self._access_token = token_data["access_token"]
expires_in = token_data.get("expires_in", 3600)
self._token_expires_at = time.time() + expires_in - 60
class OAuth2AuthorizationCode(ClientAuthScheme):
"""OAuth2 Authorization Code flow authentication.
Thread-safe implementation with asyncio.Lock to prevent concurrent token operations.
Note: Requires interactive authorization.
Attributes:
authorization_url: OAuth2 authorization endpoint URL.
token_url: OAuth2 token endpoint URL.
client_id: OAuth2 client identifier.
client_secret: OAuth2 client secret.
redirect_uri: OAuth2 redirect URI for callback.
scopes: List of required OAuth2 scopes.
"""
authorization_url: str = Field(description="OAuth2 authorization endpoint")
token_url: str = Field(description="OAuth2 token endpoint")
client_id: str = Field(description="OAuth2 client ID")
client_secret: str = Field(description="OAuth2 client secret")
redirect_uri: str = Field(description="OAuth2 redirect URI")
scopes: list[str] = Field(
default_factory=list, description="Required OAuth2 scopes"
)
_access_token: str | None = PrivateAttr(default=None)
_refresh_token: str | None = PrivateAttr(default=None)
_token_expires_at: float | None = PrivateAttr(default=None)
_authorization_callback: Callable[[str], Awaitable[str]] | None = PrivateAttr(
default=None
)
_lock: asyncio.Lock = PrivateAttr(default_factory=asyncio.Lock)
def set_authorization_callback(
self, callback: Callable[[str], Awaitable[str]] | None
) -> None:
"""Set callback to handle authorization URL.
Args:
callback: Async function that receives authorization URL and returns auth code.
"""
self._authorization_callback = callback
async def apply_auth(
self, client: httpx.AsyncClient, headers: MutableMapping[str, str]
) -> MutableMapping[str, str]:
"""Apply OAuth2 access token to Authorization header.
Uses asyncio.Lock to ensure only one coroutine handles token operations
(initial fetch or refresh) at a time.
Args:
client: HTTP client for making token requests.
headers: Current request headers.
Returns:
Updated headers with OAuth2 access token in Authorization header.
Raises:
ValueError: If authorization callback is not set.
"""
if self._access_token is None:
if self._authorization_callback is None:
msg = "Authorization callback not set. Use set_authorization_callback()"
raise ValueError(msg)
async with self._lock:
if self._access_token is None:
await self._fetch_initial_token(client)
elif self._token_expires_at and time.time() >= self._token_expires_at:
async with self._lock:
if self._token_expires_at and time.time() >= self._token_expires_at:
await self._refresh_access_token(client)
if self._access_token:
headers["Authorization"] = f"Bearer {self._access_token}"
return headers
async def _fetch_initial_token(self, client: httpx.AsyncClient) -> None:
"""Fetch initial access token using authorization code flow.
Args:
client: HTTP client for making token request.
Raises:
ValueError: If authorization callback is not set.
httpx.HTTPStatusError: If token request fails.
"""
params = {
"response_type": "code",
"client_id": self.client_id,
"redirect_uri": self.redirect_uri,
"scope": " ".join(self.scopes),
}
auth_url = f"{self.authorization_url}?{urllib.parse.urlencode(params)}"
if self._authorization_callback is None:
msg = "Authorization callback not set"
raise ValueError(msg)
auth_code = await self._authorization_callback(auth_url)
data = {
"grant_type": "authorization_code",
"code": auth_code,
"client_id": self.client_id,
"client_secret": self.client_secret,
"redirect_uri": self.redirect_uri,
}
response = await client.post(self.token_url, data=data)
response.raise_for_status()
token_data = response.json()
self._access_token = token_data["access_token"]
self._refresh_token = token_data.get("refresh_token")
expires_in = token_data.get("expires_in", 3600)
self._token_expires_at = time.time() + expires_in - 60
async def _refresh_access_token(self, client: httpx.AsyncClient) -> None:
"""Refresh the access token using refresh token.
Args:
client: HTTP client for making token request.
Raises:
httpx.HTTPStatusError: If token refresh request fails.
"""
if not self._refresh_token:
await self._fetch_initial_token(client)
return
data = {
"grant_type": "refresh_token",
"refresh_token": self._refresh_token,
"client_id": self.client_id,
"client_secret": self.client_secret,
}
response = await client.post(self.token_url, data=data)
response.raise_for_status()
token_data = response.json()
self._access_token = token_data["access_token"]
if "refresh_token" in token_data:
self._refresh_token = token_data["refresh_token"]
expires_in = token_data.get("expires_in", 3600)
self._token_expires_at = time.time() + expires_in - 60

View File

@@ -1,71 +0,0 @@
"""Deprecated: Authentication schemes for A2A protocol agents.
This module is deprecated. Import from crewai.a2a.auth instead:
- crewai.a2a.auth.ClientAuthScheme (replaces AuthScheme)
- crewai.a2a.auth.BearerTokenAuth
- crewai.a2a.auth.HTTPBasicAuth
- crewai.a2a.auth.HTTPDigestAuth
- crewai.a2a.auth.APIKeyAuth
- crewai.a2a.auth.OAuth2ClientCredentials
- crewai.a2a.auth.OAuth2AuthorizationCode
"""
from __future__ import annotations
from typing_extensions import deprecated
from crewai.a2a.auth.client_schemes import (
APIKeyAuth as _APIKeyAuth,
BearerTokenAuth as _BearerTokenAuth,
ClientAuthScheme as _ClientAuthScheme,
HTTPBasicAuth as _HTTPBasicAuth,
HTTPDigestAuth as _HTTPDigestAuth,
OAuth2AuthorizationCode as _OAuth2AuthorizationCode,
OAuth2ClientCredentials as _OAuth2ClientCredentials,
)
@deprecated("Use ClientAuthScheme from crewai.a2a.auth instead", category=FutureWarning)
class AuthScheme(_ClientAuthScheme):
"""Deprecated: Use ClientAuthScheme from crewai.a2a.auth instead."""
@deprecated("Import from crewai.a2a.auth instead", category=FutureWarning)
class BearerTokenAuth(_BearerTokenAuth):
"""Deprecated: Import from crewai.a2a.auth instead."""
@deprecated("Import from crewai.a2a.auth instead", category=FutureWarning)
class HTTPBasicAuth(_HTTPBasicAuth):
"""Deprecated: Import from crewai.a2a.auth instead."""
@deprecated("Import from crewai.a2a.auth instead", category=FutureWarning)
class HTTPDigestAuth(_HTTPDigestAuth):
"""Deprecated: Import from crewai.a2a.auth instead."""
@deprecated("Import from crewai.a2a.auth instead", category=FutureWarning)
class APIKeyAuth(_APIKeyAuth):
"""Deprecated: Import from crewai.a2a.auth instead."""
@deprecated("Import from crewai.a2a.auth instead", category=FutureWarning)
class OAuth2ClientCredentials(_OAuth2ClientCredentials):
"""Deprecated: Import from crewai.a2a.auth instead."""
@deprecated("Import from crewai.a2a.auth instead", category=FutureWarning)
class OAuth2AuthorizationCode(_OAuth2AuthorizationCode):
"""Deprecated: Import from crewai.a2a.auth instead."""
__all__ = [
"APIKeyAuth",
"AuthScheme",
"BearerTokenAuth",
"HTTPBasicAuth",
"HTTPDigestAuth",
"OAuth2AuthorizationCode",
"OAuth2ClientCredentials",
]

View File

@@ -1,759 +0,0 @@
"""Server-side authentication schemes for A2A protocol.
These schemes validate incoming requests to A2A server endpoints.
Supported authentication methods:
- Simple token validation with static bearer tokens
- Enterprise token validation (via PlusAPI)
- OpenID Connect with JWT validation using JWKS
- OAuth2 with JWT validation or token introspection
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from dataclasses import dataclass
import logging
import os
from typing import TYPE_CHECKING, Annotated, Any, ClassVar, Literal
import httpx
import jwt
from jwt import PyJWKClient
from pydantic import (
BaseModel,
BeforeValidator,
ConfigDict,
Field,
HttpUrl,
PrivateAttr,
SecretStr,
model_validator,
)
from typing_extensions import Self
if TYPE_CHECKING:
from a2a.types import OAuth2SecurityScheme
from jwt.types import Options
logger = logging.getLogger(__name__)
try:
from fastapi import HTTPException, status as http_status
HTTP_401_UNAUTHORIZED = http_status.HTTP_401_UNAUTHORIZED
HTTP_500_INTERNAL_SERVER_ERROR = http_status.HTTP_500_INTERNAL_SERVER_ERROR
HTTP_503_SERVICE_UNAVAILABLE = http_status.HTTP_503_SERVICE_UNAVAILABLE
except ImportError:
class HTTPException(Exception): # type: ignore[no-redef] # noqa: N818
"""Fallback HTTPException when FastAPI is not installed."""
def __init__(
self,
status_code: int,
detail: str | None = None,
headers: dict[str, str] | None = None,
) -> None:
self.status_code = status_code
self.detail = detail
self.headers = headers
super().__init__(detail)
HTTP_401_UNAUTHORIZED = 401
HTTP_500_INTERNAL_SERVER_ERROR = 500
HTTP_503_SERVICE_UNAVAILABLE = 503
def _coerce_secret_str(v: str | SecretStr | None) -> SecretStr | None:
"""Coerce string to SecretStr."""
if v is None or isinstance(v, SecretStr):
return v
return SecretStr(v)
CoercedSecretStr = Annotated[SecretStr, BeforeValidator(_coerce_secret_str)]
JWTAlgorithm = Literal[
"RS256",
"RS384",
"RS512",
"ES256",
"ES384",
"ES512",
"PS256",
"PS384",
"PS512",
]
@dataclass
class AuthenticatedUser:
"""Result of successful authentication.
Attributes:
token: The original token that was validated.
scheme: Name of the authentication scheme used.
claims: JWT claims from OIDC or OAuth2 authentication.
"""
token: str
scheme: str
claims: dict[str, Any] | None = None
class ServerAuthScheme(ABC, BaseModel):
"""Base class for server-side authentication schemes.
Each scheme validates incoming requests and returns an AuthenticatedUser
on success, or raises HTTPException on failure.
"""
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
@abstractmethod
async def authenticate(self, token: str) -> AuthenticatedUser:
"""Authenticate the provided token.
Args:
token: The bearer token to authenticate.
Returns:
AuthenticatedUser on successful authentication.
Raises:
HTTPException: If authentication fails.
"""
...
class SimpleTokenAuth(ServerAuthScheme):
"""Simple bearer token authentication.
Validates tokens against a configured static token or AUTH_TOKEN env var.
Attributes:
token: Expected token value. Falls back to AUTH_TOKEN env var if not set.
"""
token: CoercedSecretStr | None = Field(
default=None,
description="Expected token. Falls back to AUTH_TOKEN env var.",
)
def _get_expected_token(self) -> str | None:
"""Get the expected token value."""
if self.token:
return self.token.get_secret_value()
return os.environ.get("AUTH_TOKEN")
async def authenticate(self, token: str) -> AuthenticatedUser:
"""Authenticate using simple token comparison.
Args:
token: The bearer token to authenticate.
Returns:
AuthenticatedUser on successful authentication.
Raises:
HTTPException: If authentication fails.
"""
expected = self._get_expected_token()
if expected is None:
logger.warning(
"Simple token authentication failed",
extra={"reason": "no_token_configured"},
)
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
detail="Authentication not configured",
)
if token != expected:
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
detail="Invalid or missing authentication credentials",
)
return AuthenticatedUser(
token=token,
scheme="simple_token",
)
class EnterpriseTokenAuth(ServerAuthScheme):
"""Enterprise token authentication.
Validates tokens via the PlusAPI enterprise verification endpoint.
"""
async def authenticate(self, token: str) -> AuthenticatedUser:
"""Authenticate using enterprise token verification.
Args:
token: The bearer token to authenticate.
Raises:
NotImplementedError
"""
raise NotImplementedError
class OIDCAuth(ServerAuthScheme):
"""OpenID Connect authentication.
Validates JWTs using JWKS with caching support via PyJWT.
Attributes:
issuer: The OpenID Connect issuer URL.
audience: The expected audience claim.
jwks_url: Optional explicit JWKS URL. Derived from issuer if not set.
algorithms: List of allowed signing algorithms.
required_claims: List of claims that must be present in the token.
jwks_cache_ttl: TTL for JWKS cache in seconds.
clock_skew_seconds: Allowed clock skew for token validation.
"""
issuer: HttpUrl = Field(
description="OpenID Connect issuer URL (e.g., https://auth.example.com)"
)
audience: str = Field(description="Expected audience claim (e.g., api://my-agent)")
jwks_url: HttpUrl | None = Field(
default=None,
description="Explicit JWKS URL. Derived from issuer if not set.",
)
algorithms: list[str] = Field(
default_factory=lambda: ["RS256"],
description="List of allowed signing algorithms (RS256, ES256, etc.)",
)
required_claims: list[str] = Field(
default_factory=lambda: ["exp", "iat", "iss", "aud", "sub"],
description="List of claims that must be present in the token",
)
jwks_cache_ttl: int = Field(
default=3600,
description="TTL for JWKS cache in seconds",
ge=60,
)
clock_skew_seconds: float = Field(
default=30.0,
description="Allowed clock skew for token validation",
ge=0.0,
)
_jwk_client: PyJWKClient | None = PrivateAttr(default=None)
@model_validator(mode="after")
def _init_jwk_client(self) -> Self:
"""Initialize the JWK client after model creation."""
jwks_url = (
str(self.jwks_url)
if self.jwks_url
else f"{str(self.issuer).rstrip('/')}/.well-known/jwks.json"
)
self._jwk_client = PyJWKClient(jwks_url, lifespan=self.jwks_cache_ttl)
return self
async def authenticate(self, token: str) -> AuthenticatedUser:
"""Authenticate using OIDC JWT validation.
Args:
token: The JWT to authenticate.
Returns:
AuthenticatedUser on successful authentication.
Raises:
HTTPException: If authentication fails.
"""
if self._jwk_client is None:
raise HTTPException(
status_code=HTTP_500_INTERNAL_SERVER_ERROR,
detail="OIDC not initialized",
)
try:
signing_key = self._jwk_client.get_signing_key_from_jwt(token)
claims = jwt.decode(
token,
signing_key.key,
algorithms=self.algorithms,
audience=self.audience,
issuer=str(self.issuer).rstrip("/"),
leeway=self.clock_skew_seconds,
options={
"require": self.required_claims,
},
)
return AuthenticatedUser(
token=token,
scheme="oidc",
claims=claims,
)
except jwt.ExpiredSignatureError:
logger.debug(
"OIDC authentication failed",
extra={"reason": "token_expired", "scheme": "oidc"},
)
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
detail="Token has expired",
) from None
except jwt.InvalidAudienceError:
logger.debug(
"OIDC authentication failed",
extra={"reason": "invalid_audience", "scheme": "oidc"},
)
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
detail="Invalid token audience",
) from None
except jwt.InvalidIssuerError:
logger.debug(
"OIDC authentication failed",
extra={"reason": "invalid_issuer", "scheme": "oidc"},
)
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
detail="Invalid token issuer",
) from None
except jwt.MissingRequiredClaimError as e:
logger.debug(
"OIDC authentication failed",
extra={"reason": "missing_claim", "claim": e.claim, "scheme": "oidc"},
)
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
detail=f"Missing required claim: {e.claim}",
) from None
except jwt.PyJWKClientError as e:
logger.error(
"OIDC authentication failed",
extra={
"reason": "jwks_client_error",
"error": str(e),
"scheme": "oidc",
},
)
raise HTTPException(
status_code=HTTP_503_SERVICE_UNAVAILABLE,
detail="Unable to fetch signing keys",
) from None
except jwt.InvalidTokenError as e:
logger.debug(
"OIDC authentication failed",
extra={"reason": "invalid_token", "error": str(e), "scheme": "oidc"},
)
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
detail="Invalid or missing authentication credentials",
) from None
class OAuth2ServerAuth(ServerAuthScheme):
"""OAuth2 authentication for A2A server.
Declares OAuth2 security scheme in AgentCard and validates tokens using
either JWKS for JWT tokens or token introspection for opaque tokens.
This is distinct from OIDCAuth in that it declares an explicit OAuth2SecurityScheme
with flows, rather than an OpenIdConnectSecurityScheme with discovery URL.
Attributes:
token_url: OAuth2 token endpoint URL for client_credentials flow.
authorization_url: OAuth2 authorization endpoint for authorization_code flow.
refresh_url: Optional refresh token endpoint URL.
scopes: Available OAuth2 scopes with descriptions.
jwks_url: JWKS URL for JWT validation. Required if not using introspection.
introspection_url: Token introspection endpoint (RFC 7662). Alternative to JWKS.
introspection_client_id: Client ID for introspection endpoint authentication.
introspection_client_secret: Client secret for introspection endpoint.
audience: Expected audience claim for JWT validation.
issuer: Expected issuer claim for JWT validation.
algorithms: Allowed JWT signing algorithms.
required_claims: Claims that must be present in the token.
jwks_cache_ttl: TTL for JWKS cache in seconds.
clock_skew_seconds: Allowed clock skew for token validation.
"""
token_url: HttpUrl = Field(
description="OAuth2 token endpoint URL",
)
authorization_url: HttpUrl | None = Field(
default=None,
description="OAuth2 authorization endpoint URL for authorization_code flow",
)
refresh_url: HttpUrl | None = Field(
default=None,
description="OAuth2 refresh token endpoint URL",
)
scopes: dict[str, str] = Field(
default_factory=dict,
description="Available OAuth2 scopes with descriptions",
)
jwks_url: HttpUrl | None = Field(
default=None,
description="JWKS URL for JWT validation. Required if not using introspection.",
)
introspection_url: HttpUrl | None = Field(
default=None,
description="Token introspection endpoint (RFC 7662). Alternative to JWKS.",
)
introspection_client_id: str | None = Field(
default=None,
description="Client ID for introspection endpoint authentication",
)
introspection_client_secret: CoercedSecretStr | None = Field(
default=None,
description="Client secret for introspection endpoint authentication",
)
audience: str | None = Field(
default=None,
description="Expected audience claim for JWT validation",
)
issuer: str | None = Field(
default=None,
description="Expected issuer claim for JWT validation",
)
algorithms: list[str] = Field(
default_factory=lambda: ["RS256"],
description="Allowed JWT signing algorithms",
)
required_claims: list[str] = Field(
default_factory=lambda: ["exp", "iat"],
description="Claims that must be present in the token",
)
jwks_cache_ttl: int = Field(
default=3600,
description="TTL for JWKS cache in seconds",
ge=60,
)
clock_skew_seconds: float = Field(
default=30.0,
description="Allowed clock skew for token validation",
ge=0.0,
)
_jwk_client: PyJWKClient | None = PrivateAttr(default=None)
@model_validator(mode="after")
def _validate_and_init(self) -> Self:
"""Validate configuration and initialize JWKS client if needed."""
if not self.jwks_url and not self.introspection_url:
raise ValueError(
"Either jwks_url or introspection_url must be provided for token validation"
)
if self.introspection_url:
if not self.introspection_client_id or not self.introspection_client_secret:
raise ValueError(
"introspection_client_id and introspection_client_secret are required "
"when using token introspection"
)
if self.jwks_url:
self._jwk_client = PyJWKClient(
str(self.jwks_url), lifespan=self.jwks_cache_ttl
)
return self
async def authenticate(self, token: str) -> AuthenticatedUser:
"""Authenticate using OAuth2 token validation.
Uses JWKS validation if jwks_url is configured, otherwise falls back
to token introspection.
Args:
token: The OAuth2 access token to authenticate.
Returns:
AuthenticatedUser on successful authentication.
Raises:
HTTPException: If authentication fails.
"""
if self._jwk_client:
return await self._authenticate_jwt(token)
return await self._authenticate_introspection(token)
async def _authenticate_jwt(self, token: str) -> AuthenticatedUser:
"""Authenticate using JWKS JWT validation."""
if self._jwk_client is None:
raise HTTPException(
status_code=HTTP_500_INTERNAL_SERVER_ERROR,
detail="OAuth2 JWKS not initialized",
)
try:
signing_key = self._jwk_client.get_signing_key_from_jwt(token)
decode_options: Options = {
"require": self.required_claims,
}
claims = jwt.decode(
token,
signing_key.key,
algorithms=self.algorithms,
audience=self.audience,
issuer=self.issuer,
leeway=self.clock_skew_seconds,
options=decode_options,
)
return AuthenticatedUser(
token=token,
scheme="oauth2",
claims=claims,
)
except jwt.ExpiredSignatureError:
logger.debug(
"OAuth2 authentication failed",
extra={"reason": "token_expired", "scheme": "oauth2"},
)
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
detail="Token has expired",
) from None
except jwt.InvalidAudienceError:
logger.debug(
"OAuth2 authentication failed",
extra={"reason": "invalid_audience", "scheme": "oauth2"},
)
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
detail="Invalid token audience",
) from None
except jwt.InvalidIssuerError:
logger.debug(
"OAuth2 authentication failed",
extra={"reason": "invalid_issuer", "scheme": "oauth2"},
)
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
detail="Invalid token issuer",
) from None
except jwt.MissingRequiredClaimError as e:
logger.debug(
"OAuth2 authentication failed",
extra={"reason": "missing_claim", "claim": e.claim, "scheme": "oauth2"},
)
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
detail=f"Missing required claim: {e.claim}",
) from None
except jwt.PyJWKClientError as e:
logger.error(
"OAuth2 authentication failed",
extra={
"reason": "jwks_client_error",
"error": str(e),
"scheme": "oauth2",
},
)
raise HTTPException(
status_code=HTTP_503_SERVICE_UNAVAILABLE,
detail="Unable to fetch signing keys",
) from None
except jwt.InvalidTokenError as e:
logger.debug(
"OAuth2 authentication failed",
extra={"reason": "invalid_token", "error": str(e), "scheme": "oauth2"},
)
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
detail="Invalid or missing authentication credentials",
) from None
async def _authenticate_introspection(self, token: str) -> AuthenticatedUser:
"""Authenticate using OAuth2 token introspection (RFC 7662)."""
if not self.introspection_url:
raise HTTPException(
status_code=HTTP_500_INTERNAL_SERVER_ERROR,
detail="OAuth2 introspection not configured",
)
try:
async with httpx.AsyncClient() as client:
response = await client.post(
str(self.introspection_url),
data={"token": token},
auth=(
self.introspection_client_id or "",
self.introspection_client_secret.get_secret_value()
if self.introspection_client_secret
else "",
),
)
response.raise_for_status()
introspection_result = response.json()
except httpx.HTTPStatusError as e:
logger.error(
"OAuth2 introspection failed",
extra={"reason": "http_error", "status_code": e.response.status_code},
)
raise HTTPException(
status_code=HTTP_503_SERVICE_UNAVAILABLE,
detail="Token introspection service unavailable",
) from None
except Exception as e:
logger.error(
"OAuth2 introspection failed",
extra={"reason": "unexpected_error", "error": str(e)},
)
raise HTTPException(
status_code=HTTP_503_SERVICE_UNAVAILABLE,
detail="Token introspection failed",
) from None
if not introspection_result.get("active", False):
logger.debug(
"OAuth2 authentication failed",
extra={"reason": "token_not_active", "scheme": "oauth2"},
)
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
detail="Token is not active",
)
return AuthenticatedUser(
token=token,
scheme="oauth2",
claims=introspection_result,
)
def to_security_scheme(self) -> OAuth2SecurityScheme:
"""Generate OAuth2SecurityScheme for AgentCard declaration.
Creates an OAuth2SecurityScheme with appropriate flows based on
the configured URLs. Includes client_credentials flow if token_url
is set, and authorization_code flow if authorization_url is set.
Returns:
OAuth2SecurityScheme suitable for use in AgentCard security_schemes.
"""
from a2a.types import (
AuthorizationCodeOAuthFlow,
ClientCredentialsOAuthFlow,
OAuth2SecurityScheme,
OAuthFlows,
)
client_credentials = None
authorization_code = None
if self.token_url:
client_credentials = ClientCredentialsOAuthFlow(
token_url=str(self.token_url),
refresh_url=str(self.refresh_url) if self.refresh_url else None,
scopes=self.scopes,
)
if self.authorization_url:
authorization_code = AuthorizationCodeOAuthFlow(
authorization_url=str(self.authorization_url),
token_url=str(self.token_url),
refresh_url=str(self.refresh_url) if self.refresh_url else None,
scopes=self.scopes,
)
return OAuth2SecurityScheme(
flows=OAuthFlows(
client_credentials=client_credentials,
authorization_code=authorization_code,
),
description="OAuth2 authentication",
)
class APIKeyServerAuth(ServerAuthScheme):
"""API Key authentication for A2A server.
Validates requests using an API key in a header, query parameter, or cookie.
Attributes:
name: The name of the API key parameter (default: X-API-Key).
location: Where to look for the API key (header, query, or cookie).
api_key: The expected API key value.
"""
name: str = Field(
default="X-API-Key",
description="Name of the API key parameter",
)
location: Literal["header", "query", "cookie"] = Field(
default="header",
description="Where to look for the API key",
)
api_key: CoercedSecretStr = Field(
description="Expected API key value",
)
async def authenticate(self, token: str) -> AuthenticatedUser:
"""Authenticate using API key comparison.
Args:
token: The API key to authenticate.
Returns:
AuthenticatedUser on successful authentication.
Raises:
HTTPException: If authentication fails.
"""
if token != self.api_key.get_secret_value():
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
detail="Invalid API key",
)
return AuthenticatedUser(
token=token,
scheme="api_key",
)
class MTLSServerAuth(ServerAuthScheme):
"""Mutual TLS authentication marker for AgentCard declaration.
This scheme is primarily for AgentCard security_schemes declaration.
Actual mTLS verification happens at the TLS/transport layer, not
at the application layer via token validation.
When configured, this signals to clients that the server requires
client certificates for authentication.
"""
description: str = Field(
default="Mutual TLS certificate authentication",
description="Description for the security scheme",
)
async def authenticate(self, token: str) -> AuthenticatedUser:
"""Return authenticated user for mTLS.
mTLS verification happens at the transport layer before this is called.
If we reach this point, the TLS handshake with client cert succeeded.
Args:
token: Certificate subject or identifier (from TLS layer).
Returns:
AuthenticatedUser indicating mTLS authentication.
"""
return AuthenticatedUser(
token=token or "mtls-verified",
scheme="mtls",
)

View File

@@ -1,273 +0,0 @@
"""Authentication utilities for A2A protocol agent communication.
Provides validation and retry logic for various authentication schemes including
OAuth2, API keys, and HTTP authentication methods.
"""
import asyncio
from collections.abc import Awaitable, Callable, MutableMapping
import hashlib
import re
import threading
from typing import Final, Literal, cast
from a2a.client.errors import A2AClientHTTPError
from a2a.types import (
APIKeySecurityScheme,
AgentCard,
HTTPAuthSecurityScheme,
OAuth2SecurityScheme,
)
from httpx import AsyncClient, Response
from crewai.a2a.auth.client_schemes import (
APIKeyAuth,
BearerTokenAuth,
ClientAuthScheme,
HTTPBasicAuth,
HTTPDigestAuth,
OAuth2AuthorizationCode,
OAuth2ClientCredentials,
)
class _AuthStore:
"""Store for authentication schemes with safe concurrent access."""
def __init__(self) -> None:
self._store: dict[str, ClientAuthScheme | None] = {}
self._lock = threading.RLock()
@staticmethod
def compute_key(auth_type: str, auth_data: str) -> str:
"""Compute a collision-resistant key using SHA-256."""
content = f"{auth_type}:{auth_data}"
return hashlib.sha256(content.encode()).hexdigest()
def set(self, key: str, auth: ClientAuthScheme | None) -> None:
"""Store an auth scheme."""
with self._lock:
self._store[key] = auth
def get(self, key: str) -> ClientAuthScheme | None:
"""Retrieve an auth scheme by key."""
with self._lock:
return self._store.get(key)
def __setitem__(self, key: str, value: ClientAuthScheme | None) -> None:
with self._lock:
self._store[key] = value
def __getitem__(self, key: str) -> ClientAuthScheme | None:
with self._lock:
return self._store[key]
_auth_store = _AuthStore()
_SCHEME_PATTERN: Final[re.Pattern[str]] = re.compile(r"(\w+)\s+(.+?)(?=,\s*\w+\s+|$)")
_PARAM_PATTERN: Final[re.Pattern[str]] = re.compile(r'(\w+)=(?:"([^"]*)"|([^\s,]+))')
_SCHEME_AUTH_MAPPING: Final[dict[type, tuple[type[ClientAuthScheme], ...]]] = {
OAuth2SecurityScheme: (
OAuth2ClientCredentials,
OAuth2AuthorizationCode,
BearerTokenAuth,
),
APIKeySecurityScheme: (APIKeyAuth,),
}
_HTTPSchemeType = Literal["basic", "digest", "bearer"]
_HTTP_SCHEME_MAPPING: Final[dict[_HTTPSchemeType, type[ClientAuthScheme]]] = {
"basic": HTTPBasicAuth,
"digest": HTTPDigestAuth,
"bearer": BearerTokenAuth,
}
def _raise_auth_mismatch(
expected_classes: type[ClientAuthScheme] | tuple[type[ClientAuthScheme], ...],
provided_auth: ClientAuthScheme,
) -> None:
"""Raise authentication mismatch error.
Args:
expected_classes: Expected authentication class or tuple of classes.
provided_auth: Actually provided authentication instance.
Raises:
A2AClientHTTPError: Always raises with 401 status code.
"""
if isinstance(expected_classes, tuple):
if len(expected_classes) == 1:
required = expected_classes[0].__name__
else:
names = [cls.__name__ for cls in expected_classes]
required = f"one of ({', '.join(names)})"
else:
required = expected_classes.__name__
msg = (
f"AgentCard requires {required} authentication, "
f"but {type(provided_auth).__name__} was provided"
)
raise A2AClientHTTPError(401, msg)
def parse_www_authenticate(header_value: str) -> dict[str, dict[str, str]]:
"""Parse WWW-Authenticate header into auth challenges.
Args:
header_value: The WWW-Authenticate header value.
Returns:
Dictionary mapping auth scheme to its parameters.
Example: {"Bearer": {"realm": "api", "scope": "read write"}}
"""
if not header_value:
return {}
challenges: dict[str, dict[str, str]] = {}
for match in _SCHEME_PATTERN.finditer(header_value):
scheme = match.group(1)
params_str = match.group(2)
params: dict[str, str] = {}
for param_match in _PARAM_PATTERN.finditer(params_str):
key = param_match.group(1)
value = param_match.group(2) or param_match.group(3)
params[key] = value
challenges[scheme] = params
return challenges
def validate_auth_against_agent_card(
agent_card: AgentCard, auth: ClientAuthScheme | None
) -> None:
"""Validate that provided auth matches AgentCard security requirements.
Args:
agent_card: The A2A AgentCard containing security requirements.
auth: User-provided authentication scheme (or None).
Raises:
A2AClientHTTPError: If auth doesn't match AgentCard requirements (status_code=401).
"""
if not agent_card.security or not agent_card.security_schemes:
return
if not auth:
msg = "AgentCard requires authentication but no auth scheme provided"
raise A2AClientHTTPError(401, msg)
first_security_req = agent_card.security[0] if agent_card.security else {}
for scheme_name in first_security_req.keys():
security_scheme_wrapper = agent_card.security_schemes.get(scheme_name)
if not security_scheme_wrapper:
continue
scheme = security_scheme_wrapper.root
if allowed_classes := _SCHEME_AUTH_MAPPING.get(type(scheme)):
if not isinstance(auth, allowed_classes):
_raise_auth_mismatch(allowed_classes, auth)
return
if isinstance(scheme, HTTPAuthSecurityScheme):
scheme_key = cast(_HTTPSchemeType, scheme.scheme.lower())
if required_class := _HTTP_SCHEME_MAPPING.get(scheme_key):
if not isinstance(auth, required_class):
_raise_auth_mismatch(required_class, auth)
return
msg = "Could not validate auth against AgentCard security requirements"
raise A2AClientHTTPError(401, msg)
async def retry_on_401(
request_func: Callable[[], Awaitable[Response]],
auth_scheme: ClientAuthScheme | None,
client: AsyncClient,
headers: MutableMapping[str, str],
max_retries: int = 3,
) -> Response:
"""Retry a request on 401 authentication error.
Handles 401 errors by:
1. Parsing WWW-Authenticate header
2. Re-acquiring credentials
3. Retrying the request
Args:
request_func: Async function that makes the HTTP request.
auth_scheme: Authentication scheme to refresh credentials with.
client: HTTP client for making requests.
headers: Request headers to update with new auth.
max_retries: Maximum number of retry attempts (default: 3).
Returns:
HTTP response from the request.
Raises:
httpx.HTTPStatusError: If retries are exhausted or auth scheme is None.
"""
last_response: Response | None = None
last_challenges: dict[str, dict[str, str]] = {}
for attempt in range(max_retries):
response = await request_func()
if response.status_code != 401:
return response
last_response = response
if auth_scheme is None:
response.raise_for_status()
return response
www_authenticate = response.headers.get("WWW-Authenticate", "")
challenges = parse_www_authenticate(www_authenticate)
last_challenges = challenges
if attempt >= max_retries - 1:
break
backoff_time = 2**attempt
await asyncio.sleep(backoff_time)
await auth_scheme.apply_auth(client, headers)
if last_response:
last_response.raise_for_status()
return last_response
msg = "retry_on_401 failed without making any requests"
if last_challenges:
challenge_info = ", ".join(
f"{scheme} (realm={params.get('realm', 'N/A')})"
for scheme, params in last_challenges.items()
)
msg = f"{msg}. Server challenges: {challenge_info}"
raise RuntimeError(msg)
def configure_auth_client(
auth: HTTPDigestAuth | APIKeyAuth, client: AsyncClient
) -> None:
"""Configure HTTP client with auth-specific settings.
Only HTTPDigestAuth and APIKeyAuth need client configuration.
Args:
auth: Authentication scheme that requires client configuration.
client: HTTP client to configure.
"""
auth.configure_client(client)

View File

@@ -1,694 +0,0 @@
"""A2A configuration types.
This module is separate from experimental.a2a to avoid circular imports.
"""
from __future__ import annotations
from pathlib import Path
from typing import Any, ClassVar, Literal, cast
import warnings
from pydantic import (
BaseModel,
ConfigDict,
Field,
FilePath,
PrivateAttr,
SecretStr,
model_validator,
)
from typing_extensions import Self, deprecated
from crewai.a2a.auth.client_schemes import ClientAuthScheme
from crewai.a2a.auth.server_schemes import ServerAuthScheme
from crewai.a2a.extensions.base import ValidatedA2AExtension
from crewai.a2a.types import ProtocolVersion, TransportType, Url
try:
from a2a.types import (
AgentCapabilities,
AgentCardSignature,
AgentInterface,
AgentProvider,
AgentSkill,
SecurityScheme,
)
from crewai.a2a.extensions.server import ServerExtension
from crewai.a2a.updates import UpdateConfig
except ImportError:
UpdateConfig: Any = Any # type: ignore[no-redef]
AgentCapabilities: Any = Any # type: ignore[no-redef]
AgentCardSignature: Any = Any # type: ignore[no-redef]
AgentInterface: Any = Any # type: ignore[no-redef]
AgentProvider: Any = Any # type: ignore[no-redef]
SecurityScheme: Any = Any # type: ignore[no-redef]
AgentSkill: Any = Any # type: ignore[no-redef]
ServerExtension: Any = Any # type: ignore[no-redef]
def _get_default_update_config() -> UpdateConfig:
from crewai.a2a.updates import StreamingConfig
return StreamingConfig()
SigningAlgorithm = Literal[
"RS256",
"RS384",
"RS512",
"ES256",
"ES384",
"ES512",
"PS256",
"PS384",
"PS512",
]
class AgentCardSigningConfig(BaseModel):
"""Configuration for AgentCard JWS signing.
Provides the private key and algorithm settings for signing AgentCards.
Either private_key_path or private_key_pem must be provided, but not both.
Attributes:
private_key_path: Path to a PEM-encoded private key file.
private_key_pem: PEM-encoded private key as a secret string.
key_id: Optional key identifier for the JWS header (kid claim).
algorithm: Signing algorithm (RS256, ES256, PS256, etc.).
"""
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
private_key_path: FilePath | None = Field(
default=None,
description="Path to PEM-encoded private key file",
)
private_key_pem: SecretStr | None = Field(
default=None,
description="PEM-encoded private key",
)
key_id: str | None = Field(
default=None,
description="Key identifier for JWS header (kid claim)",
)
algorithm: SigningAlgorithm = Field(
default="RS256",
description="Signing algorithm (RS256, ES256, PS256, etc.)",
)
@model_validator(mode="after")
def _validate_key_source(self) -> Self:
"""Ensure exactly one key source is provided."""
has_path = self.private_key_path is not None
has_pem = self.private_key_pem is not None
if not has_path and not has_pem:
raise ValueError(
"Either private_key_path or private_key_pem must be provided"
)
if has_path and has_pem:
raise ValueError(
"Only one of private_key_path or private_key_pem should be provided"
)
return self
def get_private_key(self) -> str:
"""Get the private key content.
Returns:
The PEM-encoded private key as a string.
"""
if self.private_key_pem:
return self.private_key_pem.get_secret_value()
if self.private_key_path:
return Path(self.private_key_path).read_text()
raise ValueError("No private key configured")
class GRPCServerConfig(BaseModel):
"""gRPC server transport configuration.
Presence of this config in ServerTransportConfig.grpc enables gRPC transport.
Attributes:
host: Hostname to advertise in agent cards (default: localhost).
Use docker service name (e.g., 'web') for docker-compose setups.
port: Port for the gRPC server.
tls_cert_path: Path to TLS certificate file for gRPC.
tls_key_path: Path to TLS private key file for gRPC.
max_workers: Maximum number of workers for the gRPC thread pool.
reflection_enabled: Whether to enable gRPC reflection for debugging.
"""
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
host: str = Field(
default="localhost",
description="Hostname to advertise in agent cards for gRPC connections",
)
port: int = Field(
default=50051,
description="Port for the gRPC server",
)
tls_cert_path: str | None = Field(
default=None,
description="Path to TLS certificate file for gRPC",
)
tls_key_path: str | None = Field(
default=None,
description="Path to TLS private key file for gRPC",
)
max_workers: int = Field(
default=10,
description="Maximum number of workers for the gRPC thread pool",
)
reflection_enabled: bool = Field(
default=False,
description="Whether to enable gRPC reflection for debugging",
)
class GRPCClientConfig(BaseModel):
"""gRPC client transport configuration.
Attributes:
max_send_message_length: Maximum size for outgoing messages in bytes.
max_receive_message_length: Maximum size for incoming messages in bytes.
keepalive_time_ms: Time between keepalive pings in milliseconds.
keepalive_timeout_ms: Timeout for keepalive ping response in milliseconds.
"""
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
max_send_message_length: int | None = Field(
default=None,
description="Maximum size for outgoing messages in bytes",
)
max_receive_message_length: int | None = Field(
default=None,
description="Maximum size for incoming messages in bytes",
)
keepalive_time_ms: int | None = Field(
default=None,
description="Time between keepalive pings in milliseconds",
)
keepalive_timeout_ms: int | None = Field(
default=None,
description="Timeout for keepalive ping response in milliseconds",
)
class JSONRPCServerConfig(BaseModel):
"""JSON-RPC server transport configuration.
Presence of this config in ServerTransportConfig.jsonrpc enables JSON-RPC transport.
Attributes:
rpc_path: URL path for the JSON-RPC endpoint.
agent_card_path: URL path for the agent card endpoint.
"""
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
rpc_path: str = Field(
default="/a2a",
description="URL path for the JSON-RPC endpoint",
)
agent_card_path: str = Field(
default="/.well-known/agent-card.json",
description="URL path for the agent card endpoint",
)
class JSONRPCClientConfig(BaseModel):
"""JSON-RPC client transport configuration.
Attributes:
max_request_size: Maximum request body size in bytes.
"""
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
max_request_size: int | None = Field(
default=None,
description="Maximum request body size in bytes",
)
class HTTPJSONConfig(BaseModel):
"""HTTP+JSON transport configuration.
Presence of this config in ServerTransportConfig.http_json enables HTTP+JSON transport.
"""
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
class ServerPushNotificationConfig(BaseModel):
"""Configuration for outgoing webhook push notifications.
Controls how the server signs and delivers push notifications to clients.
Attributes:
signature_secret: Shared secret for HMAC-SHA256 signing of outgoing webhooks.
"""
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
signature_secret: SecretStr | None = Field(
default=None,
description="Shared secret for HMAC-SHA256 signing of outgoing push notifications",
)
class ServerTransportConfig(BaseModel):
"""Transport configuration for A2A server.
Groups all transport-related settings including preferred transport
and protocol-specific configurations.
Attributes:
preferred: Transport protocol for the preferred endpoint.
jsonrpc: JSON-RPC server transport configuration.
grpc: gRPC server transport configuration.
http_json: HTTP+JSON transport configuration.
"""
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
preferred: TransportType = Field(
default="JSONRPC",
description="Transport protocol for the preferred endpoint",
)
jsonrpc: JSONRPCServerConfig = Field(
default_factory=JSONRPCServerConfig,
description="JSON-RPC server transport configuration",
)
grpc: GRPCServerConfig | None = Field(
default=None,
description="gRPC server transport configuration",
)
http_json: HTTPJSONConfig | None = Field(
default=None,
description="HTTP+JSON transport configuration",
)
def _migrate_client_transport_fields(
transport: ClientTransportConfig,
transport_protocol: TransportType | None,
supported_transports: list[TransportType] | None,
) -> None:
"""Migrate deprecated transport fields to new config."""
if transport_protocol is not None:
warnings.warn(
"transport_protocol is deprecated, use transport=ClientTransportConfig(preferred=...) instead",
FutureWarning,
stacklevel=5,
)
object.__setattr__(transport, "preferred", transport_protocol)
if supported_transports is not None:
warnings.warn(
"supported_transports is deprecated, use transport=ClientTransportConfig(supported=...) instead",
FutureWarning,
stacklevel=5,
)
object.__setattr__(transport, "supported", supported_transports)
class ClientTransportConfig(BaseModel):
"""Transport configuration for A2A client.
Groups all client transport-related settings including preferred transport,
supported transports for negotiation, and protocol-specific configurations.
Transport negotiation logic:
1. If `preferred` is set and server supports it → use client's preferred
2. Otherwise, if server's preferred is in client's `supported` → use server's preferred
3. Otherwise, find first match from client's `supported` in server's interfaces
Attributes:
preferred: Client's preferred transport. If set, client preference takes priority.
supported: Transports the client can use, in order of preference.
jsonrpc: JSON-RPC client transport configuration.
grpc: gRPC client transport configuration.
"""
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
preferred: TransportType | None = Field(
default=None,
description="Client's preferred transport. If set, takes priority over server preference.",
)
supported: list[TransportType] = Field(
default_factory=lambda: cast(list[TransportType], ["JSONRPC"]),
description="Transports the client can use, in order of preference",
)
jsonrpc: JSONRPCClientConfig = Field(
default_factory=JSONRPCClientConfig,
description="JSON-RPC client transport configuration",
)
grpc: GRPCClientConfig = Field(
default_factory=GRPCClientConfig,
description="gRPC client transport configuration",
)
@deprecated(
"""
`crewai.a2a.config.A2AConfig` is deprecated and will be removed in v2.0.0,
use `crewai.a2a.config.A2AClientConfig` or `crewai.a2a.config.A2AServerConfig` instead.
""",
category=FutureWarning,
)
class A2AConfig(BaseModel):
"""Configuration for A2A protocol integration.
Deprecated:
Use A2AClientConfig instead. This class will be removed in a future version.
Attributes:
endpoint: A2A agent endpoint URL.
auth: Authentication scheme.
timeout: Request timeout in seconds.
max_turns: Maximum conversation turns with A2A agent.
response_model: Optional Pydantic model for structured A2A agent responses.
fail_fast: If True, raise error when agent unreachable; if False, skip and continue.
trust_remote_completion_status: If True, return A2A agent's result directly when completed.
updates: Update mechanism config.
client_extensions: Client-side processing hooks for tool injection and prompt augmentation.
transport: Transport configuration (preferred, supported transports, gRPC settings).
"""
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
endpoint: Url = Field(description="A2A agent endpoint URL")
auth: ClientAuthScheme | None = Field(
default=None,
description="Authentication scheme",
)
timeout: int = Field(default=120, description="Request timeout in seconds")
max_turns: int = Field(
default=10, description="Maximum conversation turns with A2A agent"
)
response_model: type[BaseModel] | None = Field(
default=None,
description="Optional Pydantic model for structured A2A agent responses",
)
fail_fast: bool = Field(
default=True,
description="If True, raise error when agent unreachable; if False, skip",
)
trust_remote_completion_status: bool = Field(
default=False,
description="If True, return A2A result directly when completed",
)
updates: UpdateConfig = Field(
default_factory=_get_default_update_config,
description="Update mechanism config",
)
client_extensions: list[ValidatedA2AExtension] = Field(
default_factory=list,
description="Client-side processing hooks for tool injection and prompt augmentation",
)
transport: ClientTransportConfig = Field(
default_factory=ClientTransportConfig,
description="Transport configuration (preferred, supported transports, gRPC settings)",
)
transport_protocol: TransportType | None = Field(
default=None,
description="Deprecated: Use transport.preferred instead",
exclude=True,
)
supported_transports: list[TransportType] | None = Field(
default=None,
description="Deprecated: Use transport.supported instead",
exclude=True,
)
use_client_preference: bool | None = Field(
default=None,
description="Deprecated: Set transport.preferred to enable client preference",
exclude=True,
)
_parallel_delegation: bool = PrivateAttr(default=False)
@model_validator(mode="after")
def _migrate_deprecated_transport_fields(self) -> Self:
"""Migrate deprecated transport fields to new config."""
_migrate_client_transport_fields(
self.transport, self.transport_protocol, self.supported_transports
)
if self.use_client_preference is not None:
warnings.warn(
"use_client_preference is deprecated, set transport.preferred to enable client preference",
FutureWarning,
stacklevel=4,
)
if self.use_client_preference and self.transport.supported:
object.__setattr__(
self.transport, "preferred", self.transport.supported[0]
)
return self
class A2AClientConfig(BaseModel):
"""Configuration for connecting to remote A2A agents.
Attributes:
endpoint: A2A agent endpoint URL.
auth: Authentication scheme.
timeout: Request timeout in seconds.
max_turns: Maximum conversation turns with A2A agent.
response_model: Optional Pydantic model for structured A2A agent responses.
fail_fast: If True, raise error when agent unreachable; if False, skip and continue.
trust_remote_completion_status: If True, return A2A agent's result directly when completed.
updates: Update mechanism config.
accepted_output_modes: Media types the client can accept in responses.
extensions: Extension URIs the client supports (A2A protocol extensions).
client_extensions: Client-side processing hooks for tool injection and prompt augmentation.
transport: Transport configuration (preferred, supported transports, gRPC settings).
"""
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
endpoint: Url = Field(description="A2A agent endpoint URL")
auth: ClientAuthScheme | None = Field(
default=None,
description="Authentication scheme",
)
timeout: int = Field(default=120, description="Request timeout in seconds")
max_turns: int = Field(
default=10, description="Maximum conversation turns with A2A agent"
)
response_model: type[BaseModel] | None = Field(
default=None,
description="Optional Pydantic model for structured A2A agent responses",
)
fail_fast: bool = Field(
default=True,
description="If True, raise error when agent unreachable; if False, skip",
)
trust_remote_completion_status: bool = Field(
default=False,
description="If True, return A2A result directly when completed",
)
updates: UpdateConfig = Field(
default_factory=_get_default_update_config,
description="Update mechanism config",
)
accepted_output_modes: list[str] = Field(
default_factory=lambda: ["application/json"],
description="Media types the client can accept in responses",
)
extensions: list[str] = Field(
default_factory=list,
description="Extension URIs the client supports",
)
client_extensions: list[ValidatedA2AExtension] = Field(
default_factory=list,
description="Client-side processing hooks for tool injection and prompt augmentation",
)
transport: ClientTransportConfig = Field(
default_factory=ClientTransportConfig,
description="Transport configuration (preferred, supported transports, gRPC settings)",
)
transport_protocol: TransportType | None = Field(
default=None,
description="Deprecated: Use transport.preferred instead",
exclude=True,
)
supported_transports: list[TransportType] | None = Field(
default=None,
description="Deprecated: Use transport.supported instead",
exclude=True,
)
_parallel_delegation: bool = PrivateAttr(default=False)
@model_validator(mode="after")
def _migrate_deprecated_transport_fields(self) -> Self:
"""Migrate deprecated transport fields to new config."""
_migrate_client_transport_fields(
self.transport, self.transport_protocol, self.supported_transports
)
return self
class A2AServerConfig(BaseModel):
"""Configuration for exposing a Crew or Agent as an A2A server.
All fields correspond to A2A AgentCard fields. Fields like name, description,
and skills can be auto-derived from the Crew/Agent if not provided.
Attributes:
name: Human-readable name for the agent.
description: Human-readable description of the agent.
version: Version string for the agent card.
skills: List of agent skills/capabilities.
default_input_modes: Default supported input MIME types.
default_output_modes: Default supported output MIME types.
capabilities: Declaration of optional capabilities.
protocol_version: A2A protocol version this agent supports.
provider: Information about the agent's service provider.
documentation_url: URL to the agent's documentation.
icon_url: URL to an icon for the agent.
additional_interfaces: Additional supported interfaces.
security: Security requirement objects for all interactions.
security_schemes: Security schemes available to authorize requests.
supports_authenticated_extended_card: Whether agent provides extended card to authenticated users.
url: Preferred endpoint URL for the agent.
signing_config: Configuration for signing the AgentCard with JWS.
signatures: Deprecated. Pre-computed JWS signatures. Use signing_config instead.
server_extensions: Server-side A2A protocol extensions with on_request/on_response hooks.
push_notifications: Configuration for outgoing push notifications.
transport: Transport configuration (preferred transport, gRPC, REST settings).
auth: Authentication scheme for A2A endpoints.
"""
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
name: str | None = Field(
default=None,
description="Human-readable name for the agent. Auto-derived from Crew/Agent if not provided.",
)
description: str | None = Field(
default=None,
description="Human-readable description of the agent. Auto-derived from Crew/Agent if not provided.",
)
version: str = Field(
default="1.0.0",
description="Version string for the agent card",
)
skills: list[AgentSkill] = Field(
default_factory=list,
description="List of agent skills. Auto-derived from tasks/tools if not provided.",
)
default_input_modes: list[str] = Field(
default_factory=lambda: ["text/plain", "application/json"],
description="Default supported input MIME types",
)
default_output_modes: list[str] = Field(
default_factory=lambda: ["text/plain", "application/json"],
description="Default supported output MIME types",
)
capabilities: AgentCapabilities = Field(
default_factory=lambda: AgentCapabilities(
streaming=True,
push_notifications=False,
),
description="Declaration of optional capabilities supported by the agent",
)
protocol_version: ProtocolVersion = Field(
default="0.3.0",
description="A2A protocol version this agent supports",
)
provider: AgentProvider | None = Field(
default=None,
description="Information about the agent's service provider",
)
documentation_url: Url | None = Field(
default=None,
description="URL to the agent's documentation",
)
icon_url: Url | None = Field(
default=None,
description="URL to an icon for the agent",
)
additional_interfaces: list[AgentInterface] = Field(
default_factory=list,
description="Additional supported interfaces.",
)
security: list[dict[str, list[str]]] = Field(
default_factory=list,
description="Security requirement objects for all agent interactions",
)
security_schemes: dict[str, SecurityScheme] = Field(
default_factory=dict,
description="Security schemes available to authorize requests",
)
supports_authenticated_extended_card: bool = Field(
default=False,
description="Whether agent provides extended card to authenticated users",
)
extended_skills: list[AgentSkill] = Field(
default_factory=list,
description="Additional skills visible only to authenticated users in the extended card",
)
url: Url | None = Field(
default=None,
description="Preferred endpoint URL for the agent. Set at runtime if not provided.",
)
signing_config: AgentCardSigningConfig | None = Field(
default=None,
description="Configuration for signing the AgentCard with JWS",
)
signatures: list[AgentCardSignature] | None = Field(
default=None,
description="Deprecated: Use signing_config instead. Pre-computed JWS signatures for the AgentCard.",
exclude=True,
deprecated=True,
)
server_extensions: list[ServerExtension] = Field(
default_factory=list,
description="Server-side A2A protocol extensions that modify agent behavior",
)
push_notifications: ServerPushNotificationConfig | None = Field(
default=None,
description="Configuration for outgoing push notifications",
)
transport: ServerTransportConfig = Field(
default_factory=ServerTransportConfig,
description="Transport configuration (preferred transport, gRPC, REST settings)",
)
preferred_transport: TransportType | None = Field(
default=None,
description="Deprecated: Use transport.preferred instead",
exclude=True,
deprecated=True,
)
auth: ServerAuthScheme | None = Field(
default=None,
description="Authentication scheme for A2A endpoints. Defaults to SimpleTokenAuth using AUTH_TOKEN env var.",
)
@model_validator(mode="after")
def _migrate_deprecated_fields(self) -> Self:
"""Migrate deprecated fields to new config."""
if self.preferred_transport is not None:
warnings.warn(
"preferred_transport is deprecated, use transport=ServerTransportConfig(preferred=...) instead",
FutureWarning,
stacklevel=4,
)
object.__setattr__(self.transport, "preferred", self.preferred_transport)
if self.signatures is not None:
warnings.warn(
"signatures is deprecated, use signing_config=AgentCardSigningConfig(...) instead. "
"The signatures field will be removed in v2.0.0.",
FutureWarning,
stacklevel=4,
)
return self

View File

@@ -1,503 +0,0 @@
"""A2A error codes and error response utilities.
This module provides a centralized mapping of all A2A protocol error codes
as defined in the A2A specification, plus custom CrewAI extensions.
Error codes follow JSON-RPC 2.0 conventions:
- -32700 to -32600: Standard JSON-RPC errors
- -32099 to -32000: Server errors (A2A-specific)
- -32768 to -32100: Reserved for implementation-defined errors
"""
from __future__ import annotations
from dataclasses import dataclass, field
from enum import IntEnum
from typing import Any
from a2a.client.errors import A2AClientTimeoutError
class A2APollingTimeoutError(A2AClientTimeoutError):
"""Raised when polling exceeds the configured timeout."""
class A2AErrorCode(IntEnum):
"""A2A protocol error codes.
Codes follow JSON-RPC 2.0 specification with A2A-specific extensions.
"""
# JSON-RPC 2.0 Standard Errors (-32700 to -32600)
JSON_PARSE_ERROR = -32700
"""Invalid JSON was received by the server."""
INVALID_REQUEST = -32600
"""The JSON sent is not a valid Request object."""
METHOD_NOT_FOUND = -32601
"""The method does not exist / is not available."""
INVALID_PARAMS = -32602
"""Invalid method parameter(s)."""
INTERNAL_ERROR = -32603
"""Internal JSON-RPC error."""
# A2A-Specific Errors (-32099 to -32000)
TASK_NOT_FOUND = -32001
"""The specified task was not found."""
TASK_NOT_CANCELABLE = -32002
"""The task cannot be canceled (already completed/failed)."""
PUSH_NOTIFICATION_NOT_SUPPORTED = -32003
"""Push notifications are not supported by this agent."""
UNSUPPORTED_OPERATION = -32004
"""The requested operation is not supported."""
CONTENT_TYPE_NOT_SUPPORTED = -32005
"""Incompatible content types between client and server."""
INVALID_AGENT_RESPONSE = -32006
"""The agent produced an invalid response."""
AUTHENTICATED_EXTENDED_CARD_NOT_CONFIGURED = -32007
"""Authenticated extended card feature is not configured."""
# CrewAI Custom Extensions (-32768 to -32100)
UNSUPPORTED_VERSION = -32009
"""The requested A2A protocol version is not supported."""
UNSUPPORTED_EXTENSION = -32010
"""Client does not support required protocol extensions."""
AUTHENTICATION_REQUIRED = -32011
"""Authentication is required for this operation."""
AUTHORIZATION_FAILED = -32012
"""Authorization check failed (insufficient permissions)."""
RATE_LIMIT_EXCEEDED = -32013
"""Rate limit exceeded for this client/operation."""
TASK_TIMEOUT = -32014
"""Task execution timed out."""
TRANSPORT_NEGOTIATION_FAILED = -32015
"""Failed to negotiate a compatible transport protocol."""
CONTEXT_NOT_FOUND = -32016
"""The specified context was not found."""
SKILL_NOT_FOUND = -32017
"""The specified skill was not found."""
ARTIFACT_NOT_FOUND = -32018
"""The specified artifact was not found."""
ERROR_MESSAGES: dict[int, str] = {
A2AErrorCode.JSON_PARSE_ERROR: "Parse error",
A2AErrorCode.INVALID_REQUEST: "Invalid Request",
A2AErrorCode.METHOD_NOT_FOUND: "Method not found",
A2AErrorCode.INVALID_PARAMS: "Invalid params",
A2AErrorCode.INTERNAL_ERROR: "Internal error",
A2AErrorCode.TASK_NOT_FOUND: "Task not found",
A2AErrorCode.TASK_NOT_CANCELABLE: "Task not cancelable",
A2AErrorCode.PUSH_NOTIFICATION_NOT_SUPPORTED: "Push Notification is not supported",
A2AErrorCode.UNSUPPORTED_OPERATION: "This operation is not supported",
A2AErrorCode.CONTENT_TYPE_NOT_SUPPORTED: "Incompatible content types",
A2AErrorCode.INVALID_AGENT_RESPONSE: "Invalid agent response",
A2AErrorCode.AUTHENTICATED_EXTENDED_CARD_NOT_CONFIGURED: "Authenticated Extended Card is not configured",
A2AErrorCode.UNSUPPORTED_VERSION: "Unsupported A2A version",
A2AErrorCode.UNSUPPORTED_EXTENSION: "Client does not support required extensions",
A2AErrorCode.AUTHENTICATION_REQUIRED: "Authentication required",
A2AErrorCode.AUTHORIZATION_FAILED: "Authorization failed",
A2AErrorCode.RATE_LIMIT_EXCEEDED: "Rate limit exceeded",
A2AErrorCode.TASK_TIMEOUT: "Task execution timed out",
A2AErrorCode.TRANSPORT_NEGOTIATION_FAILED: "Transport negotiation failed",
A2AErrorCode.CONTEXT_NOT_FOUND: "Context not found",
A2AErrorCode.SKILL_NOT_FOUND: "Skill not found",
A2AErrorCode.ARTIFACT_NOT_FOUND: "Artifact not found",
}
@dataclass
class A2AError(Exception):
"""Base exception for A2A protocol errors.
Attributes:
code: The A2A/JSON-RPC error code.
message: Human-readable error message.
data: Optional additional error data.
"""
code: int
message: str | None = None
data: Any = None
def __post_init__(self) -> None:
if self.message is None:
self.message = ERROR_MESSAGES.get(self.code, "Unknown error")
super().__init__(self.message)
def to_dict(self) -> dict[str, Any]:
"""Convert to JSON-RPC error object format."""
error: dict[str, Any] = {
"code": self.code,
"message": self.message,
}
if self.data is not None:
error["data"] = self.data
return error
def to_response(self, request_id: str | int | None = None) -> dict[str, Any]:
"""Convert to full JSON-RPC error response."""
return {
"jsonrpc": "2.0",
"error": self.to_dict(),
"id": request_id,
}
@dataclass
class JSONParseError(A2AError):
"""Invalid JSON was received."""
code: int = field(default=A2AErrorCode.JSON_PARSE_ERROR, init=False)
@dataclass
class InvalidRequestError(A2AError):
"""The JSON sent is not a valid Request object."""
code: int = field(default=A2AErrorCode.INVALID_REQUEST, init=False)
@dataclass
class MethodNotFoundError(A2AError):
"""The method does not exist / is not available."""
code: int = field(default=A2AErrorCode.METHOD_NOT_FOUND, init=False)
method: str | None = None
def __post_init__(self) -> None:
if self.message is None and self.method:
self.message = f"Method not found: {self.method}"
super().__post_init__()
@dataclass
class InvalidParamsError(A2AError):
"""Invalid method parameter(s)."""
code: int = field(default=A2AErrorCode.INVALID_PARAMS, init=False)
param: str | None = None
reason: str | None = None
def __post_init__(self) -> None:
if self.message is None:
if self.param and self.reason:
self.message = f"Invalid parameter '{self.param}': {self.reason}"
elif self.param:
self.message = f"Invalid parameter: {self.param}"
super().__post_init__()
@dataclass
class InternalError(A2AError):
"""Internal JSON-RPC error."""
code: int = field(default=A2AErrorCode.INTERNAL_ERROR, init=False)
@dataclass
class TaskNotFoundError(A2AError):
"""The specified task was not found."""
code: int = field(default=A2AErrorCode.TASK_NOT_FOUND, init=False)
task_id: str | None = None
def __post_init__(self) -> None:
if self.message is None and self.task_id:
self.message = f"Task not found: {self.task_id}"
super().__post_init__()
@dataclass
class TaskNotCancelableError(A2AError):
"""The task cannot be canceled."""
code: int = field(default=A2AErrorCode.TASK_NOT_CANCELABLE, init=False)
task_id: str | None = None
reason: str | None = None
def __post_init__(self) -> None:
if self.message is None:
if self.task_id and self.reason:
self.message = f"Task {self.task_id} cannot be canceled: {self.reason}"
elif self.task_id:
self.message = f"Task {self.task_id} cannot be canceled"
super().__post_init__()
@dataclass
class PushNotificationNotSupportedError(A2AError):
"""Push notifications are not supported."""
code: int = field(default=A2AErrorCode.PUSH_NOTIFICATION_NOT_SUPPORTED, init=False)
@dataclass
class UnsupportedOperationError(A2AError):
"""The requested operation is not supported."""
code: int = field(default=A2AErrorCode.UNSUPPORTED_OPERATION, init=False)
operation: str | None = None
def __post_init__(self) -> None:
if self.message is None and self.operation:
self.message = f"Operation not supported: {self.operation}"
super().__post_init__()
@dataclass
class ContentTypeNotSupportedError(A2AError):
"""Incompatible content types."""
code: int = field(default=A2AErrorCode.CONTENT_TYPE_NOT_SUPPORTED, init=False)
requested_types: list[str] | None = None
supported_types: list[str] | None = None
def __post_init__(self) -> None:
if self.message is None and self.requested_types and self.supported_types:
self.message = (
f"Content type not supported. Requested: {self.requested_types}, "
f"Supported: {self.supported_types}"
)
super().__post_init__()
@dataclass
class InvalidAgentResponseError(A2AError):
"""The agent produced an invalid response."""
code: int = field(default=A2AErrorCode.INVALID_AGENT_RESPONSE, init=False)
@dataclass
class AuthenticatedExtendedCardNotConfiguredError(A2AError):
"""Authenticated extended card is not configured."""
code: int = field(
default=A2AErrorCode.AUTHENTICATED_EXTENDED_CARD_NOT_CONFIGURED, init=False
)
@dataclass
class UnsupportedVersionError(A2AError):
"""The requested A2A version is not supported."""
code: int = field(default=A2AErrorCode.UNSUPPORTED_VERSION, init=False)
requested_version: str | None = None
supported_versions: list[str] | None = None
def __post_init__(self) -> None:
if self.message is None and self.requested_version:
msg = f"Unsupported A2A version: {self.requested_version}"
if self.supported_versions:
msg += f". Supported versions: {', '.join(self.supported_versions)}"
self.message = msg
super().__post_init__()
@dataclass
class UnsupportedExtensionError(A2AError):
"""Client does not support required extensions."""
code: int = field(default=A2AErrorCode.UNSUPPORTED_EXTENSION, init=False)
required_extensions: list[str] | None = None
def __post_init__(self) -> None:
if self.message is None and self.required_extensions:
self.message = f"Client does not support required extensions: {', '.join(self.required_extensions)}"
super().__post_init__()
@dataclass
class AuthenticationRequiredError(A2AError):
"""Authentication is required."""
code: int = field(default=A2AErrorCode.AUTHENTICATION_REQUIRED, init=False)
@dataclass
class AuthorizationFailedError(A2AError):
"""Authorization check failed."""
code: int = field(default=A2AErrorCode.AUTHORIZATION_FAILED, init=False)
required_scope: str | None = None
def __post_init__(self) -> None:
if self.message is None and self.required_scope:
self.message = (
f"Authorization failed. Required scope: {self.required_scope}"
)
super().__post_init__()
@dataclass
class RateLimitExceededError(A2AError):
"""Rate limit exceeded."""
code: int = field(default=A2AErrorCode.RATE_LIMIT_EXCEEDED, init=False)
retry_after: int | None = None
def __post_init__(self) -> None:
if self.message is None and self.retry_after:
self.message = (
f"Rate limit exceeded. Retry after {self.retry_after} seconds"
)
if self.retry_after:
self.data = {"retry_after": self.retry_after}
super().__post_init__()
@dataclass
class TaskTimeoutError(A2AError):
"""Task execution timed out."""
code: int = field(default=A2AErrorCode.TASK_TIMEOUT, init=False)
task_id: str | None = None
timeout_seconds: float | None = None
def __post_init__(self) -> None:
if self.message is None:
if self.task_id and self.timeout_seconds:
self.message = (
f"Task {self.task_id} timed out after {self.timeout_seconds}s"
)
elif self.task_id:
self.message = f"Task {self.task_id} timed out"
super().__post_init__()
@dataclass
class TransportNegotiationFailedError(A2AError):
"""Failed to negotiate a compatible transport protocol."""
code: int = field(default=A2AErrorCode.TRANSPORT_NEGOTIATION_FAILED, init=False)
client_transports: list[str] | None = None
server_transports: list[str] | None = None
def __post_init__(self) -> None:
if self.message is None and self.client_transports and self.server_transports:
self.message = (
f"Transport negotiation failed. Client: {self.client_transports}, "
f"Server: {self.server_transports}"
)
super().__post_init__()
@dataclass
class ContextNotFoundError(A2AError):
"""The specified context was not found."""
code: int = field(default=A2AErrorCode.CONTEXT_NOT_FOUND, init=False)
context_id: str | None = None
def __post_init__(self) -> None:
if self.message is None and self.context_id:
self.message = f"Context not found: {self.context_id}"
super().__post_init__()
@dataclass
class SkillNotFoundError(A2AError):
"""The specified skill was not found."""
code: int = field(default=A2AErrorCode.SKILL_NOT_FOUND, init=False)
skill_id: str | None = None
def __post_init__(self) -> None:
if self.message is None and self.skill_id:
self.message = f"Skill not found: {self.skill_id}"
super().__post_init__()
@dataclass
class ArtifactNotFoundError(A2AError):
"""The specified artifact was not found."""
code: int = field(default=A2AErrorCode.ARTIFACT_NOT_FOUND, init=False)
artifact_id: str | None = None
def __post_init__(self) -> None:
if self.message is None and self.artifact_id:
self.message = f"Artifact not found: {self.artifact_id}"
super().__post_init__()
def create_error_response(
code: int | A2AErrorCode,
message: str | None = None,
data: Any = None,
request_id: str | int | None = None,
) -> dict[str, Any]:
"""Create a JSON-RPC error response.
Args:
code: Error code (A2AErrorCode or int).
message: Optional error message (uses default if not provided).
data: Optional additional error data.
request_id: Request ID for correlation.
Returns:
Dict in JSON-RPC error response format.
"""
error = A2AError(code=int(code), message=message, data=data)
return error.to_response(request_id)
def is_retryable_error(code: int) -> bool:
"""Check if an error is potentially retryable.
Args:
code: Error code to check.
Returns:
True if the error might be resolved by retrying.
"""
retryable_codes = {
A2AErrorCode.INTERNAL_ERROR,
A2AErrorCode.RATE_LIMIT_EXCEEDED,
A2AErrorCode.TASK_TIMEOUT,
}
return code in retryable_codes
def is_client_error(code: int) -> bool:
"""Check if an error is a client-side error.
Args:
code: Error code to check.
Returns:
True if the error is due to client request issues.
"""
client_error_codes = {
A2AErrorCode.JSON_PARSE_ERROR,
A2AErrorCode.INVALID_REQUEST,
A2AErrorCode.METHOD_NOT_FOUND,
A2AErrorCode.INVALID_PARAMS,
A2AErrorCode.TASK_NOT_FOUND,
A2AErrorCode.CONTENT_TYPE_NOT_SUPPORTED,
A2AErrorCode.UNSUPPORTED_VERSION,
A2AErrorCode.UNSUPPORTED_EXTENSION,
A2AErrorCode.CONTEXT_NOT_FOUND,
A2AErrorCode.SKILL_NOT_FOUND,
A2AErrorCode.ARTIFACT_NOT_FOUND,
}
return code in client_error_codes

View File

@@ -1,37 +0,0 @@
"""A2A Protocol Extensions for CrewAI.
This module contains extensions to the A2A (Agent-to-Agent) protocol.
**Client-side extensions** (A2AExtension) allow customizing how the A2A wrapper
processes requests and responses during delegation to remote agents. These provide
hooks for tool injection, prompt augmentation, and response processing.
**Server-side extensions** (ServerExtension) allow agents to offer additional
functionality beyond the core A2A specification. Clients activate extensions
via the X-A2A-Extensions header.
See: https://a2a-protocol.org/latest/topics/extensions/
"""
from crewai.a2a.extensions.base import (
A2AExtension,
ConversationState,
ExtensionRegistry,
ValidatedA2AExtension,
)
from crewai.a2a.extensions.server import (
ExtensionContext,
ServerExtension,
ServerExtensionRegistry,
)
__all__ = [
"A2AExtension",
"ConversationState",
"ExtensionContext",
"ExtensionRegistry",
"ServerExtension",
"ServerExtensionRegistry",
"ValidatedA2AExtension",
]

View File

@@ -1,148 +0,0 @@
"""A2UI (Agent to UI) declarative UI protocol support for CrewAI."""
from crewai.a2a.extensions.a2ui.catalog import (
AudioPlayer,
Button,
Card,
CheckBox,
Column,
DateTimeInput,
Divider,
Icon,
Image,
List,
Modal,
MultipleChoice,
Row,
Slider,
Tabs,
Text,
TextField,
Video,
)
from crewai.a2a.extensions.a2ui.client_extension import A2UIClientExtension
from crewai.a2a.extensions.a2ui.models import (
A2UIEvent,
A2UIMessage,
A2UIResponse,
BeginRendering,
DataModelUpdate,
DeleteSurface,
SurfaceUpdate,
UserAction,
)
from crewai.a2a.extensions.a2ui.server_extension import (
A2UI_STANDARD_CATALOG_ID,
A2UI_V09_BASIC_CATALOG_ID,
A2UI_V09_EXTENSION_URI,
A2UIServerExtension,
)
from crewai.a2a.extensions.a2ui.v0_9 import (
A2UIEventV09,
A2UIMessageV09,
ActionEvent,
ActionV09,
AudioPlayerV09,
ButtonV09,
CardV09,
CheckBoxV09,
ChoicePickerV09,
ClientDataModel,
ClientErrorV09,
ColumnV09,
CreateSurface,
DateTimeInputV09,
DeleteSurfaceV09,
DividerV09,
IconV09,
ImageV09,
ListV09,
ModalV09,
RowV09,
SliderV09,
TabsV09,
TextFieldV09,
TextV09,
Theme,
UpdateComponents,
UpdateDataModel,
VideoV09,
)
from crewai.a2a.extensions.a2ui.validator import (
validate_a2ui_event,
validate_a2ui_event_v09,
validate_a2ui_message,
validate_a2ui_message_v09,
validate_catalog_components,
validate_catalog_components_v09,
)
__all__ = [
"A2UI_STANDARD_CATALOG_ID",
"A2UI_V09_BASIC_CATALOG_ID",
"A2UI_V09_EXTENSION_URI",
"A2UIClientExtension",
"A2UIEvent",
"A2UIEventV09",
"A2UIMessage",
"A2UIMessageV09",
"A2UIResponse",
"A2UIServerExtension",
"ActionEvent",
"ActionV09",
"AudioPlayer",
"AudioPlayerV09",
"BeginRendering",
"Button",
"ButtonV09",
"Card",
"CardV09",
"CheckBox",
"CheckBoxV09",
"ChoicePickerV09",
"ClientDataModel",
"ClientErrorV09",
"Column",
"ColumnV09",
"CreateSurface",
"DataModelUpdate",
"DateTimeInput",
"DateTimeInputV09",
"DeleteSurface",
"DeleteSurfaceV09",
"Divider",
"DividerV09",
"Icon",
"IconV09",
"Image",
"ImageV09",
"List",
"ListV09",
"Modal",
"ModalV09",
"MultipleChoice",
"Row",
"RowV09",
"Slider",
"SliderV09",
"SurfaceUpdate",
"Tabs",
"TabsV09",
"Text",
"TextField",
"TextFieldV09",
"TextV09",
"Theme",
"UpdateComponents",
"UpdateDataModel",
"UserAction",
"Video",
"VideoV09",
"validate_a2ui_event",
"validate_a2ui_event_v09",
"validate_a2ui_message",
"validate_a2ui_message_v09",
"validate_catalog_components",
"validate_catalog_components_v09",
]

View File

@@ -1,467 +0,0 @@
"""Typed helpers for A2UI standard catalog components.
These models provide optional type safety for standard catalog components.
Agents can also use raw dicts validated against the JSON schema.
"""
from __future__ import annotations
from typing import Literal
from pydantic import BaseModel, ConfigDict, Field
class StringBinding(BaseModel):
"""A string value: literal or data-model path."""
literal_string: str | None = Field(
default=None, alias="literalString", description="Literal string value."
)
path: str | None = Field(default=None, description="Data-model path reference.")
model_config = ConfigDict(populate_by_name=True, extra="forbid")
class NumberBinding(BaseModel):
"""A numeric value: literal or data-model path."""
literal_number: float | None = Field(
default=None, alias="literalNumber", description="Literal numeric value."
)
path: str | None = Field(default=None, description="Data-model path reference.")
model_config = ConfigDict(populate_by_name=True, extra="forbid")
class BooleanBinding(BaseModel):
"""A boolean value: literal or data-model path."""
literal_boolean: bool | None = Field(
default=None, alias="literalBoolean", description="Literal boolean value."
)
path: str | None = Field(default=None, description="Data-model path reference.")
model_config = ConfigDict(populate_by_name=True, extra="forbid")
class ArrayBinding(BaseModel):
"""An array value: literal or data-model path."""
literal_array: list[str] | None = Field(
default=None, alias="literalArray", description="Literal array of strings."
)
path: str | None = Field(default=None, description="Data-model path reference.")
model_config = ConfigDict(populate_by_name=True, extra="forbid")
class ChildrenDef(BaseModel):
"""Children definition for layout components."""
explicit_list: list[str] | None = Field(
default=None,
alias="explicitList",
description="Explicit list of child component IDs.",
)
template: ChildTemplate | None = Field(
default=None, description="Template for generating dynamic children."
)
model_config = ConfigDict(populate_by_name=True, extra="forbid")
class ChildTemplate(BaseModel):
"""Template for generating dynamic children from a data model list."""
component_id: str = Field(
alias="componentId", description="ID of the component to repeat."
)
data_binding: str = Field(
alias="dataBinding", description="Data-model path to bind the template to."
)
model_config = ConfigDict(populate_by_name=True, extra="forbid")
class ActionContextEntry(BaseModel):
"""A key-value pair in an action context payload."""
key: str = Field(description="Context entry key.")
value: ActionBoundValue = Field(description="Context entry value.")
model_config = ConfigDict(extra="forbid")
class ActionBoundValue(BaseModel):
"""A value in an action context: literal or data-model path."""
path: str | None = Field(default=None, description="Data-model path reference.")
literal_string: str | None = Field(
default=None, alias="literalString", description="Literal string value."
)
literal_number: float | None = Field(
default=None, alias="literalNumber", description="Literal numeric value."
)
literal_boolean: bool | None = Field(
default=None, alias="literalBoolean", description="Literal boolean value."
)
model_config = ConfigDict(populate_by_name=True, extra="forbid")
class Action(BaseModel):
"""Client-side action dispatched by interactive components."""
name: str = Field(description="Action name dispatched on interaction.")
context: list[ActionContextEntry] | None = Field(
default=None, description="Key-value pairs sent with the action."
)
model_config = ConfigDict(extra="forbid")
class TabItem(BaseModel):
"""A single tab definition."""
title: StringBinding = Field(description="Tab title text.")
child: str = Field(description="Component ID rendered as the tab content.")
model_config = ConfigDict(extra="forbid")
class MultipleChoiceOption(BaseModel):
"""A single option in a MultipleChoice component."""
label: StringBinding = Field(description="Display label for the option.")
value: str = Field(description="Value submitted when the option is selected.")
model_config = ConfigDict(extra="forbid")
class Text(BaseModel):
"""Displays text content."""
text: StringBinding = Field(description="Text content to display.")
usage_hint: Literal["h1", "h2", "h3", "h4", "h5", "caption", "body"] | None = Field(
default=None, alias="usageHint", description="Semantic hint for text styling."
)
model_config = ConfigDict(populate_by_name=True, extra="forbid")
class Image(BaseModel):
"""Displays an image."""
url: StringBinding = Field(description="Image source URL.")
fit: Literal["contain", "cover", "fill", "none", "scale-down"] | None = Field(
default=None, description="Object-fit behavior for the image."
)
usage_hint: (
Literal[
"icon", "avatar", "smallFeature", "mediumFeature", "largeFeature", "header"
]
| None
) = Field(
default=None, alias="usageHint", description="Semantic hint for image sizing."
)
model_config = ConfigDict(populate_by_name=True, extra="forbid")
IconName = Literal[
"accountCircle",
"add",
"arrowBack",
"arrowForward",
"attachFile",
"calendarToday",
"call",
"camera",
"check",
"close",
"delete",
"download",
"edit",
"event",
"error",
"favorite",
"favoriteOff",
"folder",
"help",
"home",
"info",
"locationOn",
"lock",
"lockOpen",
"mail",
"menu",
"moreVert",
"moreHoriz",
"notificationsOff",
"notifications",
"payment",
"person",
"phone",
"photo",
"print",
"refresh",
"search",
"send",
"settings",
"share",
"shoppingCart",
"star",
"starHalf",
"starOff",
"upload",
"visibility",
"visibilityOff",
"warning",
]
class IconBinding(BaseModel):
"""Icon name: literal enum or data-model path."""
literal_string: IconName | None = Field(
default=None, alias="literalString", description="Literal icon name."
)
path: str | None = Field(default=None, description="Data-model path reference.")
model_config = ConfigDict(populate_by_name=True, extra="forbid")
class Icon(BaseModel):
"""Displays a named icon."""
name: IconBinding = Field(description="Icon name binding.")
model_config = ConfigDict(extra="forbid")
class Video(BaseModel):
"""Displays a video player."""
url: StringBinding = Field(description="Video source URL.")
model_config = ConfigDict(extra="forbid")
class AudioPlayer(BaseModel):
"""Displays an audio player."""
url: StringBinding = Field(description="Audio source URL.")
description: StringBinding | None = Field(
default=None, description="Accessible description of the audio content."
)
model_config = ConfigDict(extra="forbid")
class Row(BaseModel):
"""Horizontal layout container."""
children: ChildrenDef = Field(description="Child components in this row.")
distribution: (
Literal["center", "end", "spaceAround", "spaceBetween", "spaceEvenly", "start"]
| None
) = Field(
default=None, description="How children are distributed along the main axis."
)
alignment: Literal["start", "center", "end", "stretch"] | None = Field(
default=None, description="How children are aligned on the cross axis."
)
model_config = ConfigDict(extra="forbid")
class Column(BaseModel):
"""Vertical layout container."""
children: ChildrenDef = Field(description="Child components in this column.")
distribution: (
Literal["start", "center", "end", "spaceBetween", "spaceAround", "spaceEvenly"]
| None
) = Field(
default=None, description="How children are distributed along the main axis."
)
alignment: Literal["center", "end", "start", "stretch"] | None = Field(
default=None, description="How children are aligned on the cross axis."
)
model_config = ConfigDict(extra="forbid")
class List(BaseModel):
"""Scrollable list container."""
children: ChildrenDef = Field(description="Child components in this list.")
direction: Literal["vertical", "horizontal"] | None = Field(
default=None, description="Scroll direction of the list."
)
alignment: Literal["start", "center", "end", "stretch"] | None = Field(
default=None, description="How children are aligned on the cross axis."
)
model_config = ConfigDict(extra="forbid")
class Card(BaseModel):
"""Card container wrapping a single child."""
child: str = Field(description="Component ID of the card content.")
model_config = ConfigDict(extra="forbid")
class Tabs(BaseModel):
"""Tabbed navigation container."""
tab_items: list[TabItem] = Field(
alias="tabItems", description="List of tab definitions."
)
model_config = ConfigDict(populate_by_name=True, extra="forbid")
class Divider(BaseModel):
"""A visual divider line."""
axis: Literal["horizontal", "vertical"] | None = Field(
default=None, description="Orientation of the divider."
)
model_config = ConfigDict(extra="forbid")
class Modal(BaseModel):
"""A modal dialog with an entry point trigger and content."""
entry_point_child: str = Field(
alias="entryPointChild", description="Component ID that triggers the modal."
)
content_child: str = Field(
alias="contentChild", description="Component ID rendered inside the modal."
)
model_config = ConfigDict(populate_by_name=True, extra="forbid")
class Button(BaseModel):
"""An interactive button with an action."""
child: str = Field(description="Component ID of the button label.")
primary: bool | None = Field(
default=None, description="Whether the button uses primary styling."
)
action: Action = Field(description="Action dispatched when the button is clicked.")
model_config = ConfigDict(extra="forbid")
class CheckBox(BaseModel):
"""A checkbox input."""
label: StringBinding = Field(description="Label text for the checkbox.")
value: BooleanBinding = Field(
description="Boolean value binding for the checkbox state."
)
model_config = ConfigDict(extra="forbid")
class TextField(BaseModel):
"""A text input field."""
label: StringBinding = Field(description="Label text for the input.")
text: StringBinding | None = Field(
default=None, description="Current text value binding."
)
text_field_type: (
Literal["date", "longText", "number", "shortText", "obscured"] | None
) = Field(default=None, alias="textFieldType", description="Input type variant.")
validation_regexp: str | None = Field(
default=None,
alias="validationRegexp",
description="Regex pattern for client-side validation.",
)
model_config = ConfigDict(populate_by_name=True, extra="forbid")
class DateTimeInput(BaseModel):
"""A date and/or time picker."""
value: StringBinding = Field(description="ISO date/time string value binding.")
enable_date: bool | None = Field(
default=None,
alias="enableDate",
description="Whether the date picker is enabled.",
)
enable_time: bool | None = Field(
default=None,
alias="enableTime",
description="Whether the time picker is enabled.",
)
model_config = ConfigDict(populate_by_name=True, extra="forbid")
class MultipleChoice(BaseModel):
"""A multiple-choice selection component."""
selections: ArrayBinding = Field(description="Array binding for selected values.")
options: list[MultipleChoiceOption] = Field(description="Available choices.")
max_allowed_selections: int | None = Field(
default=None,
alias="maxAllowedSelections",
description="Maximum number of selections allowed.",
)
variant: Literal["checkbox", "chips"] | None = Field(
default=None, description="Visual variant for the selection UI."
)
filterable: bool | None = Field(
default=None, description="Whether options can be filtered by typing."
)
model_config = ConfigDict(populate_by_name=True, extra="forbid")
class Slider(BaseModel):
"""A numeric slider input."""
value: NumberBinding = Field(
description="Numeric value binding for the slider position."
)
min_value: float | None = Field(
default=None, alias="minValue", description="Minimum slider value."
)
max_value: float | None = Field(
default=None, alias="maxValue", description="Maximum slider value."
)
model_config = ConfigDict(populate_by_name=True, extra="forbid")
STANDARD_CATALOG_COMPONENTS: frozenset[str] = frozenset(
{
"Text",
"Image",
"Icon",
"Video",
"AudioPlayer",
"Row",
"Column",
"List",
"Card",
"Tabs",
"Divider",
"Modal",
"Button",
"CheckBox",
"TextField",
"DateTimeInput",
"MultipleChoice",
"Slider",
}
)

View File

@@ -1,496 +0,0 @@
"""A2UI client extension for the A2A protocol."""
from __future__ import annotations
from collections.abc import Sequence
import logging
from typing import TYPE_CHECKING, Any, Literal, cast
from pydantic import Field
from pydantic.dataclasses import dataclass
from typing_extensions import TypeIs, TypedDict
from crewai.a2a.extensions.a2ui.models import extract_a2ui_json_objects
from crewai.a2a.extensions.a2ui.prompt import (
build_a2ui_system_prompt,
build_a2ui_v09_system_prompt,
)
from crewai.a2a.extensions.a2ui.server_extension import (
A2UI_MIME_TYPE,
A2UI_STANDARD_CATALOG_ID,
A2UI_V09_BASIC_CATALOG_ID,
)
from crewai.a2a.extensions.a2ui.v0_9 import extract_a2ui_v09_json_objects
from crewai.a2a.extensions.a2ui.validator import (
A2UIValidationError,
validate_a2ui_message,
validate_a2ui_message_v09,
)
if TYPE_CHECKING:
from a2a.types import Message
from crewai.agent.core import Agent
logger = logging.getLogger(__name__)
class StylesDict(TypedDict, total=False):
"""Serialized surface styling."""
font: str
primaryColor: str
class ComponentEntryDict(TypedDict, total=False):
"""Serialized component entry in a surface update."""
id: str
weight: float
component: dict[str, Any]
class BeginRenderingDict(TypedDict, total=False):
"""Serialized beginRendering payload."""
surfaceId: str
root: str
catalogId: str
styles: StylesDict
class SurfaceUpdateDict(TypedDict, total=False):
"""Serialized surfaceUpdate payload."""
surfaceId: str
components: list[ComponentEntryDict]
class DataEntryDict(TypedDict, total=False):
"""Serialized data model entry."""
key: str
valueString: str
valueNumber: float
valueBoolean: bool
valueMap: list[DataEntryDict]
class DataModelUpdateDict(TypedDict, total=False):
"""Serialized dataModelUpdate payload."""
surfaceId: str
path: str
contents: list[DataEntryDict]
class DeleteSurfaceDict(TypedDict):
"""Serialized deleteSurface payload."""
surfaceId: str
class A2UIMessageDict(TypedDict, total=False):
"""Serialized A2UI v0.8 server-to-client message with exactly one key set."""
beginRendering: BeginRenderingDict
surfaceUpdate: SurfaceUpdateDict
dataModelUpdate: DataModelUpdateDict
deleteSurface: DeleteSurfaceDict
class ThemeDict(TypedDict, total=False):
"""Serialized v0.9 theme."""
primaryColor: str
iconUrl: str
agentDisplayName: str
class CreateSurfaceDict(TypedDict, total=False):
"""Serialized createSurface payload."""
surfaceId: str
catalogId: str
theme: ThemeDict
sendDataModel: bool
class UpdateComponentsDict(TypedDict, total=False):
"""Serialized updateComponents payload."""
surfaceId: str
components: list[dict[str, Any]]
class UpdateDataModelDict(TypedDict, total=False):
"""Serialized updateDataModel payload."""
surfaceId: str
path: str
value: Any
class DeleteSurfaceV09Dict(TypedDict):
"""Serialized v0.9 deleteSurface payload."""
surfaceId: str
class A2UIMessageV09Dict(TypedDict, total=False):
"""Serialized A2UI v0.9 server-to-client message with version and exactly one key set."""
version: Literal["v0.9"]
createSurface: CreateSurfaceDict
updateComponents: UpdateComponentsDict
updateDataModel: UpdateDataModelDict
deleteSurface: DeleteSurfaceV09Dict
A2UIAnyMessageDict = A2UIMessageDict | A2UIMessageV09Dict
def is_v09_message(msg: A2UIAnyMessageDict) -> TypeIs[A2UIMessageV09Dict]:
"""Narrow a message dict to the v0.9 variant."""
return msg.get("version") == "v0.9"
def is_v08_message(msg: A2UIAnyMessageDict) -> TypeIs[A2UIMessageDict]:
"""Narrow a message dict to the v0.8 variant."""
return "version" not in msg
@dataclass
class A2UIConversationState:
"""Tracks active A2UI surfaces and data models across a conversation."""
active_surfaces: dict[str, dict[str, Any]] = Field(default_factory=dict)
data_models: dict[str, list[dict[str, Any]]] = Field(default_factory=dict)
last_a2ui_messages: list[A2UIAnyMessageDict] = Field(default_factory=list)
initialized_surfaces: set[str] = Field(default_factory=set)
def is_ready(self) -> bool:
"""Return True when at least one surface has been initialized via beginRendering."""
return bool(self.initialized_surfaces)
class A2UIClientExtension:
"""A2A client extension that adds A2UI support to agents.
Implements the ``A2AExtension`` protocol to inject A2UI prompt
instructions, track UI state across conversations, and validate
A2UI messages in responses.
Example::
A2AClientConfig(
endpoint="...",
extensions=["https://a2ui.org/a2a-extension/a2ui/v0.8"],
client_extensions=[A2UIClientExtension()],
)
"""
def __init__(
self,
catalog_id: str | None = None,
allowed_components: list[str] | None = None,
version: str = "v0.8",
) -> None:
"""Initialize the A2UI client extension.
Args:
catalog_id: Catalog identifier to use for prompt generation.
allowed_components: Subset of component names to expose to the agent.
version: Protocol version, ``"v0.8"`` or ``"v0.9"``.
"""
self._catalog_id = catalog_id
self._allowed_components = allowed_components
self._version = version
def inject_tools(self, agent: Agent) -> None:
"""No-op — A2UI uses prompt augmentation rather than tool injection."""
def extract_state_from_history(
self, conversation_history: Sequence[Message]
) -> A2UIConversationState | None:
"""Scan conversation history for A2UI DataParts and track surface state.
When ``catalog_id`` is set, only surfaces matching that catalog are tracked.
"""
state = A2UIConversationState()
for message in conversation_history:
for part in message.parts:
root = part.root
if root.kind != "data":
continue
metadata = root.metadata or {}
mime_type = metadata.get("mimeType", "")
if mime_type != A2UI_MIME_TYPE:
continue
data = root.data
if not isinstance(data, dict):
continue
surface_id = _get_surface_id(data)
if not surface_id:
continue
if self._catalog_id and "beginRendering" in data:
catalog_id = data["beginRendering"].get("catalogId")
if catalog_id and catalog_id != self._catalog_id:
continue
if self._catalog_id and "createSurface" in data:
catalog_id = data["createSurface"].get("catalogId")
if catalog_id and catalog_id != self._catalog_id:
continue
if "deleteSurface" in data:
state.active_surfaces.pop(surface_id, None)
state.data_models.pop(surface_id, None)
state.initialized_surfaces.discard(surface_id)
elif "beginRendering" in data:
state.initialized_surfaces.add(surface_id)
state.active_surfaces[surface_id] = data["beginRendering"]
elif "createSurface" in data:
state.initialized_surfaces.add(surface_id)
state.active_surfaces[surface_id] = data["createSurface"]
elif "surfaceUpdate" in data:
if surface_id not in state.initialized_surfaces:
logger.warning(
"surfaceUpdate for uninitialized surface %s",
surface_id,
)
state.active_surfaces[surface_id] = data["surfaceUpdate"]
elif "updateComponents" in data:
if surface_id not in state.initialized_surfaces:
logger.warning(
"updateComponents for uninitialized surface %s",
surface_id,
)
state.active_surfaces[surface_id] = data["updateComponents"]
elif "dataModelUpdate" in data:
contents = data["dataModelUpdate"].get("contents", [])
state.data_models.setdefault(surface_id, []).extend(contents)
elif "updateDataModel" in data:
update = data["updateDataModel"]
state.data_models.setdefault(surface_id, []).append(update)
if not state.active_surfaces and not state.data_models:
return None
return state
def augment_prompt(
self,
base_prompt: str,
_conversation_state: A2UIConversationState | None,
) -> str:
"""Append A2UI system prompt instructions to the base prompt."""
if self._version == "v0.9":
a2ui_prompt = build_a2ui_v09_system_prompt(
catalog_id=self._catalog_id,
allowed_components=self._allowed_components,
)
else:
a2ui_prompt = build_a2ui_system_prompt(
catalog_id=self._catalog_id,
allowed_components=self._allowed_components,
)
return f"{base_prompt}\n\n{a2ui_prompt}"
def process_response(
self,
agent_response: Any,
conversation_state: A2UIConversationState | None,
) -> Any:
"""Extract and validate A2UI JSON from agent output.
When ``allowed_components`` is set, components not in the allowlist are
logged and stripped from surface updates. Stores extracted A2UI messages
on the conversation state and returns the original response unchanged.
"""
text = (
agent_response if isinstance(agent_response, str) else str(agent_response)
)
results: list[A2UIAnyMessageDict]
if self._version == "v0.9":
results = list(_extract_and_validate_v09(text))
if self._allowed_components:
allowed = set(self._allowed_components)
results = [
_filter_components_v09(m, allowed)
for m in results
if is_v09_message(m)
]
else:
results = list(_extract_and_validate(text))
if self._allowed_components:
allowed = set(self._allowed_components)
results = [
_filter_components(msg, allowed)
for msg in results
if is_v08_message(msg)
]
if results and conversation_state is not None:
conversation_state.last_a2ui_messages = results
return agent_response
def prepare_message_metadata(
self,
_conversation_state: A2UIConversationState | None,
) -> dict[str, Any]:
"""Inject a2uiClientCapabilities into outbound A2A message metadata.
Per the A2UI extension spec, clients must declare supported catalog
IDs in every outbound message's metadata. v0.9 nests capabilities
under a ``"v0.9"`` key per ``client_capabilities.json``.
"""
if self._version == "v0.9":
default_catalog = A2UI_V09_BASIC_CATALOG_ID
catalog_ids = [default_catalog]
if self._catalog_id and self._catalog_id != default_catalog:
catalog_ids.append(self._catalog_id)
return {
"a2uiClientCapabilities": {
"v0.9": {
"supportedCatalogIds": catalog_ids,
},
},
}
catalog_ids = [A2UI_STANDARD_CATALOG_ID]
if self._catalog_id and self._catalog_id != A2UI_STANDARD_CATALOG_ID:
catalog_ids.append(self._catalog_id)
return {
"a2uiClientCapabilities": {
"supportedCatalogIds": catalog_ids,
},
}
_ALL_SURFACE_ID_KEYS = (
"beginRendering",
"surfaceUpdate",
"dataModelUpdate",
"deleteSurface",
"createSurface",
"updateComponents",
"updateDataModel",
)
def _get_surface_id(data: dict[str, Any]) -> str | None:
"""Extract surfaceId from any A2UI v0.8 or v0.9 message type."""
for key in _ALL_SURFACE_ID_KEYS:
inner = data.get(key)
if isinstance(inner, dict):
sid = inner.get("surfaceId")
if isinstance(sid, str):
return sid
return None
def _filter_components(msg: A2UIMessageDict, allowed: set[str]) -> A2UIMessageDict:
"""Strip components whose type is not in *allowed* from a surfaceUpdate."""
surface_update = msg.get("surfaceUpdate")
if not isinstance(surface_update, dict):
return msg
components = surface_update.get("components")
if not isinstance(components, list):
return msg
filtered = []
for entry in components:
component = entry.get("component", {})
component_types = set(component.keys())
disallowed = component_types - allowed
if disallowed:
logger.debug(
"Stripping disallowed component type(s) %s from surface update",
disallowed,
)
continue
filtered.append(entry)
if len(filtered) == len(components):
return msg
return {**msg, "surfaceUpdate": {**surface_update, "components": filtered}}
def _filter_components_v09(
msg: A2UIMessageV09Dict, allowed: set[str]
) -> A2UIMessageV09Dict:
"""Strip v0.9 components whose type is not in *allowed* from updateComponents.
v0.9 components use a flat structure where ``component`` is a type-name string.
"""
update = msg.get("updateComponents")
if not isinstance(update, dict):
return msg
components = update.get("components")
if not isinstance(components, list):
return msg
filtered = []
for entry in components:
comp_type = entry.get("component") if isinstance(entry, dict) else None
if isinstance(comp_type, str) and comp_type not in allowed:
logger.debug("Stripping disallowed v0.9 component type %s", comp_type)
continue
filtered.append(entry)
if len(filtered) == len(components):
return msg
return {**msg, "updateComponents": {**update, "components": filtered}}
def _extract_and_validate(text: str) -> list[A2UIMessageDict]:
"""Extract A2UI v0.8 JSON objects from text and validate them."""
return [
dumped
for candidate in extract_a2ui_json_objects(text)
if (dumped := _try_validate(candidate)) is not None
]
def _try_validate(candidate: dict[str, Any]) -> A2UIMessageDict | None:
"""Validate a single v0.8 A2UI candidate, returning None on failure."""
try:
msg = validate_a2ui_message(candidate)
except A2UIValidationError:
logger.debug(
"Skipping invalid A2UI candidate in agent output",
exc_info=True,
)
return None
return cast(A2UIMessageDict, msg.model_dump(by_alias=True, exclude_none=True))
def _extract_and_validate_v09(text: str) -> list[A2UIMessageV09Dict]:
"""Extract and validate v0.9 A2UI JSON objects from text."""
return [
dumped
for candidate in extract_a2ui_v09_json_objects(text)
if (dumped := _try_validate_v09(candidate)) is not None
]
def _try_validate_v09(candidate: dict[str, Any]) -> A2UIMessageV09Dict | None:
"""Validate a single v0.9 A2UI candidate, returning None on failure."""
try:
msg = validate_a2ui_message_v09(candidate)
except A2UIValidationError:
logger.debug(
"Skipping invalid A2UI v0.9 candidate in agent output",
exc_info=True,
)
return None
return cast(A2UIMessageV09Dict, msg.model_dump(by_alias=True, exclude_none=True))

View File

@@ -1,277 +0,0 @@
"""Pydantic models for A2UI server-to-client messages and client-to-server events."""
from __future__ import annotations
import json
import re
from typing import Any
from pydantic import BaseModel, ConfigDict, Field, model_validator
class BoundValue(BaseModel):
"""A value that can be a literal or a data-model path reference."""
literal_string: str | None = Field(
default=None, alias="literalString", description="Literal string value."
)
literal_number: float | None = Field(
default=None, alias="literalNumber", description="Literal numeric value."
)
literal_boolean: bool | None = Field(
default=None, alias="literalBoolean", description="Literal boolean value."
)
literal_array: list[str] | None = Field(
default=None, alias="literalArray", description="Literal array of strings."
)
path: str | None = Field(default=None, description="Data-model path reference.")
model_config = ConfigDict(populate_by_name=True, extra="forbid")
class MapEntry(BaseModel):
"""A single entry in a valueMap adjacency list, supporting recursive nesting."""
key: str = Field(description="Entry key.")
value_string: str | None = Field(
default=None, alias="valueString", description="String value."
)
value_number: float | None = Field(
default=None, alias="valueNumber", description="Numeric value."
)
value_boolean: bool | None = Field(
default=None, alias="valueBoolean", description="Boolean value."
)
model_config = ConfigDict(populate_by_name=True, extra="forbid")
class DataEntry(BaseModel):
"""A data model entry with a key and exactly one typed value."""
key: str = Field(description="Entry key.")
value_string: str | None = Field(
default=None, alias="valueString", description="String value."
)
value_number: float | None = Field(
default=None, alias="valueNumber", description="Numeric value."
)
value_boolean: bool | None = Field(
default=None, alias="valueBoolean", description="Boolean value."
)
value_map: list[MapEntry] | None = Field(
default=None, alias="valueMap", description="Nested map entries."
)
model_config = ConfigDict(populate_by_name=True, extra="forbid")
_HEX_COLOR_PATTERN: re.Pattern[str] = re.compile(r"^#[0-9a-fA-F]{6}$")
class Styles(BaseModel):
"""Surface styling information."""
font: str | None = Field(default=None, description="Font family name.")
primary_color: str | None = Field(
default=None,
alias="primaryColor",
pattern=_HEX_COLOR_PATTERN.pattern,
description="Primary color as a hex string.",
)
model_config = ConfigDict(populate_by_name=True, extra="forbid")
class ComponentEntry(BaseModel):
"""A single component in a UI widget tree.
The ``component`` dict must contain exactly one key — the component type
name (e.g. ``"Text"``, ``"Button"``) — whose value holds the component
properties. Component internals are left as ``dict[str, Any]`` because
they are catalog-dependent; use the typed helpers in ``catalog.py`` for
the standard catalog.
"""
id: str = Field(description="Unique component identifier.")
weight: float | None = Field(
default=None, description="Flex weight for layout distribution."
)
component: dict[str, Any] = Field(
description="Component type name mapped to its properties."
)
model_config = ConfigDict(extra="forbid")
class BeginRendering(BaseModel):
"""Signals the client to begin rendering a surface."""
surface_id: str = Field(alias="surfaceId", description="Unique surface identifier.")
root: str = Field(description="Component ID of the root element.")
catalog_id: str | None = Field(
default=None,
alias="catalogId",
description="Catalog identifier for the surface.",
)
styles: Styles | None = Field(
default=None, description="Surface styling overrides."
)
model_config = ConfigDict(populate_by_name=True, extra="forbid")
class SurfaceUpdate(BaseModel):
"""Updates a surface with a new set of components."""
surface_id: str = Field(alias="surfaceId", description="Target surface identifier.")
components: list[ComponentEntry] = Field(
min_length=1, description="Components to render on the surface."
)
model_config = ConfigDict(populate_by_name=True, extra="forbid")
class DataModelUpdate(BaseModel):
"""Updates the data model for a surface."""
surface_id: str = Field(alias="surfaceId", description="Target surface identifier.")
path: str | None = Field(
default=None, description="Data-model path prefix for the update."
)
contents: list[DataEntry] = Field(
description="Data entries to merge into the model."
)
model_config = ConfigDict(populate_by_name=True, extra="forbid")
class DeleteSurface(BaseModel):
"""Signals the client to delete a surface."""
surface_id: str = Field(
alias="surfaceId", description="Surface identifier to delete."
)
model_config = ConfigDict(populate_by_name=True, extra="forbid")
class A2UIMessage(BaseModel):
"""Union wrapper for the four server-to-client A2UI message types.
Exactly one of the fields must be set.
"""
begin_rendering: BeginRendering | None = Field(
default=None,
alias="beginRendering",
description="Begin rendering a new surface.",
)
surface_update: SurfaceUpdate | None = Field(
default=None,
alias="surfaceUpdate",
description="Update components on a surface.",
)
data_model_update: DataModelUpdate | None = Field(
default=None,
alias="dataModelUpdate",
description="Update the surface data model.",
)
delete_surface: DeleteSurface | None = Field(
default=None, alias="deleteSurface", description="Delete an existing surface."
)
model_config = ConfigDict(populate_by_name=True, extra="forbid")
@model_validator(mode="after")
def _check_exactly_one(self) -> A2UIMessage:
"""Enforce the spec's exactly-one-of constraint."""
fields = [
self.begin_rendering,
self.surface_update,
self.data_model_update,
self.delete_surface,
]
count = sum(f is not None for f in fields)
if count != 1:
raise ValueError(f"Exactly one A2UI message type must be set, got {count}")
return self
class UserAction(BaseModel):
"""Reports a user-initiated action from a component."""
name: str = Field(description="Action name.")
surface_id: str = Field(alias="surfaceId", description="Source surface identifier.")
source_component_id: str = Field(
alias="sourceComponentId", description="Component that triggered the action."
)
timestamp: str = Field(description="ISO 8601 timestamp of the action.")
context: dict[str, Any] = Field(description="Action context payload.")
model_config = ConfigDict(populate_by_name=True)
class ClientError(BaseModel):
"""Reports a client-side error."""
model_config = ConfigDict(extra="allow")
class A2UIEvent(BaseModel):
"""Union wrapper for client-to-server events."""
user_action: UserAction | None = Field(
default=None, alias="userAction", description="User-initiated action event."
)
error: ClientError | None = Field(
default=None, description="Client-side error report."
)
model_config = ConfigDict(populate_by_name=True)
@model_validator(mode="after")
def _check_exactly_one(self) -> A2UIEvent:
"""Enforce the spec's exactly-one-of constraint."""
fields = [self.user_action, self.error]
count = sum(f is not None for f in fields)
if count != 1:
raise ValueError(f"Exactly one A2UI event type must be set, got {count}")
return self
class A2UIResponse(BaseModel):
"""Typed wrapper for responses containing A2UI messages."""
text: str = Field(description="Raw text content of the response.")
a2ui_parts: list[dict[str, Any]] = Field(
default_factory=list, description="A2UI DataParts extracted from the response."
)
a2ui_messages: list[dict[str, Any]] = Field(
default_factory=list, description="Validated A2UI message dicts."
)
_A2UI_KEYS = {"beginRendering", "surfaceUpdate", "dataModelUpdate", "deleteSurface"}
def extract_a2ui_json_objects(text: str) -> list[dict[str, Any]]:
"""Extract JSON objects containing A2UI keys from text.
Uses ``json.JSONDecoder.raw_decode`` for robust parsing that correctly
handles braces inside string literals.
"""
decoder = json.JSONDecoder()
results: list[dict[str, Any]] = []
idx = 0
while idx < len(text):
idx = text.find("{", idx)
if idx == -1:
break
try:
obj, end_idx = decoder.raw_decode(text, idx)
if isinstance(obj, dict) and _A2UI_KEYS & obj.keys():
results.append(obj)
idx = end_idx
except json.JSONDecodeError:
idx += 1
return results

View File

@@ -1,150 +0,0 @@
"""System prompt generation for A2UI-capable agents."""
from __future__ import annotations
import json
from crewai.a2a.extensions.a2ui.catalog import STANDARD_CATALOG_COMPONENTS
from crewai.a2a.extensions.a2ui.schema import load_schema
from crewai.a2a.extensions.a2ui.server_extension import (
A2UI_EXTENSION_URI,
A2UI_V09_BASIC_CATALOG_ID,
)
from crewai.a2a.extensions.a2ui.v0_9 import (
BASIC_CATALOG_COMPONENTS as V09_CATALOG_COMPONENTS,
BASIC_CATALOG_FUNCTIONS,
)
def build_a2ui_system_prompt(
catalog_id: str | None = None,
allowed_components: list[str] | None = None,
) -> str:
"""Build a v0.8 system prompt fragment instructing the LLM to produce A2UI output.
Args:
catalog_id: Catalog identifier to reference. Defaults to the
standard catalog version derived from ``A2UI_EXTENSION_URI``.
allowed_components: Subset of component names to expose. When
``None``, all standard catalog components are available.
Returns:
A system prompt string to append to the agent's instructions.
"""
components = sorted(
allowed_components
if allowed_components is not None
else STANDARD_CATALOG_COMPONENTS
)
catalog_label = catalog_id or f"standard ({A2UI_EXTENSION_URI.rsplit('/', 1)[-1]})"
resolved_schema = load_schema(
"server_to_client_with_standard_catalog", version="v0.8"
)
schema_json = json.dumps(resolved_schema, indent=2)
return f"""\
<A2UI_INSTRUCTIONS>
You can generate rich, declarative UI by emitting A2UI JSON messages.
CATALOG: {catalog_label}
AVAILABLE COMPONENTS: {", ".join(components)}
MESSAGE TYPES (emit exactly ONE per message):
- beginRendering: Initialize a new surface with a root component and optional styles.
- surfaceUpdate: Send/update components for a surface. Each component has a unique id \
and a "component" wrapper containing exactly one component-type key.
- dataModelUpdate: Update the data model for a surface. Data entries have a key and \
one typed value (valueString, valueNumber, valueBoolean, valueMap).
- deleteSurface: Remove a surface.
DATA BINDING:
- Use {{"literalString": "..."}} for inline string values.
- Use {{"literalNumber": ...}} for inline numeric values.
- Use {{"literalBoolean": ...}} for inline boolean values.
- Use {{"literalArray": ["...", "..."]}} for inline array values.
- Use {{"path": "/data/model/path"}} to bind to data model values.
ACTIONS:
- Interactive components (Button, etc.) have an "action" with a "name" and optional \
"context" array of key/value pairs.
- Values in action context can use data binding (path or literal).
OUTPUT FORMAT:
Emit each A2UI message as a valid JSON object. When generating UI, produce a \
beginRendering message first, then surfaceUpdate messages with components, and \
optionally dataModelUpdate messages to populate data-bound values.
SCHEMA:
{schema_json}
</A2UI_INSTRUCTIONS>"""
def build_a2ui_v09_system_prompt(
catalog_id: str | None = None,
allowed_components: list[str] | None = None,
) -> str:
"""Build a v0.9 system prompt fragment instructing the LLM to produce A2UI output.
Args:
catalog_id: Catalog identifier to reference. Defaults to the
v0.9 basic catalog.
allowed_components: Subset of component names to expose. When
``None``, all basic catalog components are available.
Returns:
A system prompt string to append to the agent's instructions.
"""
components = sorted(
allowed_components if allowed_components is not None else V09_CATALOG_COMPONENTS
)
catalog_label = catalog_id or A2UI_V09_BASIC_CATALOG_ID
functions = sorted(BASIC_CATALOG_FUNCTIONS)
envelope_schema = load_schema("server_to_client", version="v0.9")
schema_json = json.dumps(envelope_schema, indent=2)
return f"""\
<A2UI_INSTRUCTIONS>
You can generate rich, declarative UI by emitting A2UI v0.9 JSON messages.
Every message MUST include "version": "v0.9".
CATALOG: {catalog_label}
AVAILABLE COMPONENTS: {", ".join(components)}
AVAILABLE FUNCTIONS: {", ".join(functions)}
MESSAGE TYPES (emit exactly ONE per message alongside "version": "v0.9"):
- createSurface: Create a new surface. Requires surfaceId and catalogId. \
Optionally includes theme (primaryColor, iconUrl, agentDisplayName) and \
sendDataModel (boolean).
- updateComponents: Send/update components for a surface. Each component is a flat \
object with "id", "component" (type name string), and type-specific properties at the \
top level. One component MUST have id "root".
- updateDataModel: Update the data model. Uses "path" (JSON Pointer) and "value" \
(any JSON type). Omit "value" to delete the key at path.
- deleteSurface: Remove a surface by surfaceId.
COMPONENT FORMAT (flat, NOT nested):
{{"id": "myText", "component": "Text", "text": "Hello world", "variant": "h1"}}
{{"id": "myBtn", "component": "Button", "child": "myText", "action": {{"event": \
{{"name": "click"}}}}}}
DATA BINDING:
- Use plain values for literals: "text": "Hello" or "value": 42
- Use {{"path": "/data/model/path"}} to bind to data model values.
- Use {{"call": "functionName", "args": {{...}}}} for client-side functions.
ACTIONS:
- Server event: {{"event": {{"name": "actionName", "context": {{"key": "value"}}}}}}
- Local function: {{"functionCall": {{"call": "openUrl", "args": {{"url": "..."}}}}}}
OUTPUT FORMAT:
Emit each A2UI message as a valid JSON object. When generating UI, first emit a \
createSurface message with the catalogId, then updateComponents messages with \
components (one must have id "root"), and optionally updateDataModel messages.
ENVELOPE SCHEMA:
{schema_json}
</A2UI_INSTRUCTIONS>"""

View File

@@ -1,74 +0,0 @@
"""Schema loading utilities for vendored A2UI JSON schemas."""
from __future__ import annotations
import json
from pathlib import Path
from typing import Any
_V08_DIR = Path(__file__).parent / "v0_8"
_V09_DIR = Path(__file__).parent / "v0_9"
_SCHEMA_CACHE: dict[str, dict[str, Any]] = {}
SCHEMA_NAMES: frozenset[str] = frozenset(
{
"server_to_client",
"client_to_server",
"standard_catalog_definition",
"server_to_client_with_standard_catalog",
}
)
V09_SCHEMA_NAMES: frozenset[str] = frozenset(
{
"server_to_client",
"client_to_server",
"common_types",
"basic_catalog",
"client_capabilities",
"server_capabilities",
"client_data_model",
}
)
def load_schema(name: str, *, version: str = "v0.8") -> dict[str, Any]:
"""Load a vendored A2UI JSON schema by name and version.
Args:
name: Schema name without extension, e.g. ``"server_to_client"``.
version: Protocol version, ``"v0.8"`` or ``"v0.9"``.
Returns:
Parsed JSON schema dict.
Raises:
ValueError: If the schema name or version is not recognized.
FileNotFoundError: If the schema file is missing from the package.
"""
if version == "v0.8":
valid_names = SCHEMA_NAMES
schema_dir = _V08_DIR
elif version == "v0.9":
valid_names = V09_SCHEMA_NAMES
schema_dir = _V09_DIR
else:
raise ValueError(f"Unknown version {version!r}. Available: v0.8, v0.9")
if name not in valid_names:
raise ValueError(
f"Unknown schema {name!r} for {version}. Available: {sorted(valid_names)}"
)
cache_key = f"{version}/{name}"
if cache_key in _SCHEMA_CACHE:
return _SCHEMA_CACHE[cache_key]
path = schema_dir / f"{name}.json"
with path.open() as f:
schema: dict[str, Any] = json.load(f)
_SCHEMA_CACHE[cache_key] = schema
return schema

View File

@@ -1,53 +0,0 @@
{
"title": "A2UI (Agent to UI) Client-to-Server Event Schema",
"description": "Describes a JSON payload for a client-to-server event message.",
"type": "object",
"minProperties": 1,
"maxProperties": 1,
"properties": {
"userAction": {
"type": "object",
"description": "Reports a user-initiated action from a component.",
"properties": {
"name": {
"type": "string",
"description": "The name of the action, taken from the component's action.name property."
},
"surfaceId": {
"type": "string",
"description": "The id of the surface where the event originated."
},
"sourceComponentId": {
"type": "string",
"description": "The id of the component that triggered the event."
},
"timestamp": {
"type": "string",
"format": "date-time",
"description": "An ISO 8601 timestamp of when the event occurred."
},
"context": {
"type": "object",
"description": "A JSON object containing the key-value pairs from the component's action.context, after resolving all data bindings.",
"additionalProperties": true
}
},
"required": [
"name",
"surfaceId",
"sourceComponentId",
"timestamp",
"context"
]
},
"error": {
"type": "object",
"description": "Reports a client-side error. The content is flexible.",
"additionalProperties": true
}
},
"oneOf": [
{ "required": ["userAction"] },
{ "required": ["error"] }
]
}

View File

@@ -1,148 +0,0 @@
{
"title": "A2UI Message Schema",
"description": "Describes a JSON payload for an A2UI (Agent to UI) message, which is used to dynamically construct and update user interfaces. A message MUST contain exactly ONE of the action properties: 'beginRendering', 'surfaceUpdate', 'dataModelUpdate', or 'deleteSurface'.",
"type": "object",
"additionalProperties": false,
"properties": {
"beginRendering": {
"type": "object",
"description": "Signals the client to begin rendering a surface with a root component and specific styles.",
"additionalProperties": false,
"properties": {
"surfaceId": {
"type": "string",
"description": "The unique identifier for the UI surface to be rendered."
},
"catalogId": {
"type": "string",
"description": "The identifier of the component catalog to use for this surface. If omitted, the client MUST default to the standard catalog for this A2UI version (https://a2ui.org/specification/v0_8/standard_catalog_definition.json)."
},
"root": {
"type": "string",
"description": "The ID of the root component to render."
},
"styles": {
"type": "object",
"description": "Styling information for the UI.",
"additionalProperties": true
}
},
"required": ["root", "surfaceId"]
},
"surfaceUpdate": {
"type": "object",
"description": "Updates a surface with a new set of components.",
"additionalProperties": false,
"properties": {
"surfaceId": {
"type": "string",
"description": "The unique identifier for the UI surface to be updated. If you are adding a new surface this *must* be a new, unique identified that has never been used for any existing surfaces shown."
},
"components": {
"type": "array",
"description": "A list containing all UI components for the surface.",
"minItems": 1,
"items": {
"type": "object",
"description": "Represents a *single* component in a UI widget tree. This component could be one of many supported types.",
"additionalProperties": false,
"properties": {
"id": {
"type": "string",
"description": "The unique identifier for this component."
},
"weight": {
"type": "number",
"description": "The relative weight of this component within a Row or Column. This corresponds to the CSS 'flex-grow' property. Note: this may ONLY be set when the component is a direct descendant of a Row or Column."
},
"component": {
"type": "object",
"description": "A wrapper object that MUST contain exactly one key, which is the name of the component type. The value is an object containing the properties for that specific component.",
"additionalProperties": true
}
},
"required": ["id", "component"]
}
}
},
"required": ["surfaceId", "components"]
},
"dataModelUpdate": {
"type": "object",
"description": "Updates the data model for a surface.",
"additionalProperties": false,
"properties": {
"surfaceId": {
"type": "string",
"description": "The unique identifier for the UI surface this data model update applies to."
},
"path": {
"type": "string",
"description": "An optional path to a location within the data model (e.g., '/user/name'). If omitted, or set to '/', the entire data model will be replaced."
},
"contents": {
"type": "array",
"description": "An array of data entries. Each entry must contain a 'key' and exactly one corresponding typed 'value*' property.",
"items": {
"type": "object",
"description": "A single data entry. Exactly one 'value*' property should be provided alongside the key.",
"additionalProperties": false,
"properties": {
"key": {
"type": "string",
"description": "The key for this data entry."
},
"valueString": {
"type": "string"
},
"valueNumber": {
"type": "number"
},
"valueBoolean": {
"type": "boolean"
},
"valueMap": {
"description": "Represents a map as an adjacency list.",
"type": "array",
"items": {
"type": "object",
"description": "One entry in the map. Exactly one 'value*' property should be provided alongside the key.",
"additionalProperties": false,
"properties": {
"key": {
"type": "string"
},
"valueString": {
"type": "string"
},
"valueNumber": {
"type": "number"
},
"valueBoolean": {
"type": "boolean"
}
},
"required": ["key"]
}
}
},
"required": ["key"]
}
}
},
"required": ["contents", "surfaceId"]
},
"deleteSurface": {
"type": "object",
"description": "Signals the client to delete the surface identified by 'surfaceId'.",
"additionalProperties": false,
"properties": {
"surfaceId": {
"type": "string",
"description": "The unique identifier for the UI surface to be deleted."
}
},
"required": ["surfaceId"]
}
}
}

View File

@@ -1,832 +0,0 @@
{
"title": "A2UI Message Schema",
"description": "Describes a JSON payload for an A2UI (Agent to UI) message, which is used to dynamically construct and update user interfaces. A message MUST contain exactly ONE of the action properties: 'beginRendering', 'surfaceUpdate', 'dataModelUpdate', or 'deleteSurface'.",
"type": "object",
"additionalProperties": false,
"properties": {
"beginRendering": {
"type": "object",
"description": "Signals the client to begin rendering a surface with a root component and specific styles.",
"additionalProperties": false,
"properties": {
"surfaceId": {
"type": "string",
"description": "The unique identifier for the UI surface to be rendered."
},
"root": {
"type": "string",
"description": "The ID of the root component to render."
},
"styles": {
"type": "object",
"description": "Styling information for the UI.",
"additionalProperties": false,
"properties": {
"font": {
"type": "string",
"description": "The primary font for the UI."
},
"primaryColor": {
"type": "string",
"description": "The primary UI color as a hexadecimal code (e.g., '#00BFFF').",
"pattern": "^#[0-9a-fA-F]{6}$"
}
}
}
},
"required": ["root", "surfaceId"]
},
"surfaceUpdate": {
"type": "object",
"description": "Updates a surface with a new set of components.",
"additionalProperties": false,
"properties": {
"surfaceId": {
"type": "string",
"description": "The unique identifier for the UI surface to be updated. If you are adding a new surface this *must* be a new, unique identified that has never been used for any existing surfaces shown."
},
"components": {
"type": "array",
"description": "A list containing all UI components for the surface.",
"minItems": 1,
"items": {
"type": "object",
"description": "Represents a *single* component in a UI widget tree. This component could be one of many supported types.",
"additionalProperties": false,
"properties": {
"id": {
"type": "string",
"description": "The unique identifier for this component."
},
"weight": {
"type": "number",
"description": "The relative weight of this component within a Row or Column. This corresponds to the CSS 'flex-grow' property. Note: this may ONLY be set when the component is a direct descendant of a Row or Column."
},
"component": {
"type": "object",
"description": "A wrapper object that MUST contain exactly one key, which is the name of the component type (e.g., 'Heading'). The value is an object containing the properties for that specific component.",
"additionalProperties": false,
"properties": {
"Text": {
"type": "object",
"additionalProperties": false,
"properties": {
"text": {
"type": "object",
"description": "The text content to display. This can be a literal string or a reference to a value in the data model ('path', e.g., '/doc/title'). While simple Markdown formatting is supported (i.e. without HTML, images, or links), utilizing dedicated UI components is generally preferred for a richer and more structured presentation.",
"additionalProperties": false,
"properties": {
"literalString": {
"type": "string"
},
"path": {
"type": "string"
}
}
},
"usageHint": {
"type": "string",
"description": "A hint for the base text style. One of:\n- `h1`: Largest heading.\n- `h2`: Second largest heading.\n- `h3`: Third largest heading.\n- `h4`: Fourth largest heading.\n- `h5`: Fifth largest heading.\n- `caption`: Small text for captions.\n- `body`: Standard body text.",
"enum": [
"h1",
"h2",
"h3",
"h4",
"h5",
"caption",
"body"
]
}
},
"required": ["text"]
},
"Image": {
"type": "object",
"additionalProperties": false,
"properties": {
"url": {
"type": "object",
"description": "The URL of the image to display. This can be a literal string ('literal') or a reference to a value in the data model ('path', e.g. '/thumbnail/url').",
"additionalProperties": false,
"properties": {
"literalString": {
"type": "string"
},
"path": {
"type": "string"
}
}
},
"fit": {
"type": "string",
"description": "Specifies how the image should be resized to fit its container. This corresponds to the CSS 'object-fit' property.",
"enum": [
"contain",
"cover",
"fill",
"none",
"scale-down"
]
},
"usageHint": {
"type": "string",
"description": "A hint for the image size and style. One of:\n- `icon`: Small square icon.\n- `avatar`: Circular avatar image.\n- `smallFeature`: Small feature image.\n- `mediumFeature`: Medium feature image.\n- `largeFeature`: Large feature image.\n- `header`: Full-width, full bleed, header image.",
"enum": [
"icon",
"avatar",
"smallFeature",
"mediumFeature",
"largeFeature",
"header"
]
}
},
"required": ["url"]
},
"Icon": {
"type": "object",
"additionalProperties": false,
"properties": {
"name": {
"type": "object",
"description": "The name of the icon to display. This can be a literal string or a reference to a value in the data model ('path', e.g. '/form/submit').",
"additionalProperties": false,
"properties": {
"literalString": {
"type": "string",
"enum": [
"accountCircle",
"add",
"arrowBack",
"arrowForward",
"attachFile",
"calendarToday",
"call",
"camera",
"check",
"close",
"delete",
"download",
"edit",
"event",
"error",
"favorite",
"favoriteOff",
"folder",
"help",
"home",
"info",
"locationOn",
"lock",
"lockOpen",
"mail",
"menu",
"moreVert",
"moreHoriz",
"notificationsOff",
"notifications",
"payment",
"person",
"phone",
"photo",
"print",
"refresh",
"search",
"send",
"settings",
"share",
"shoppingCart",
"star",
"starHalf",
"starOff",
"upload",
"visibility",
"visibilityOff",
"warning"
]
},
"path": {
"type": "string"
}
}
}
},
"required": ["name"]
},
"Video": {
"type": "object",
"additionalProperties": false,
"properties": {
"url": {
"type": "object",
"description": "The URL of the video to display. This can be a literal string or a reference to a value in the data model ('path', e.g. '/video/url').",
"additionalProperties": false,
"properties": {
"literalString": {
"type": "string"
},
"path": {
"type": "string"
}
}
}
},
"required": ["url"]
},
"AudioPlayer": {
"type": "object",
"additionalProperties": false,
"properties": {
"url": {
"type": "object",
"description": "The URL of the audio to be played. This can be a literal string ('literal') or a reference to a value in the data model ('path', e.g. '/song/url').",
"additionalProperties": false,
"properties": {
"literalString": {
"type": "string"
},
"path": {
"type": "string"
}
}
},
"description": {
"type": "object",
"description": "A description of the audio, such as a title or summary. This can be a literal string or a reference to a value in the data model ('path', e.g. '/song/title').",
"additionalProperties": false,
"properties": {
"literalString": {
"type": "string"
},
"path": {
"type": "string"
}
}
}
},
"required": ["url"]
},
"Row": {
"type": "object",
"additionalProperties": false,
"properties": {
"children": {
"type": "object",
"description": "Defines the children. Use 'explicitList' for a fixed set of children, or 'template' to generate children from a data list.",
"additionalProperties": false,
"properties": {
"explicitList": {
"type": "array",
"items": {
"type": "string"
}
},
"template": {
"type": "object",
"description": "A template for generating a dynamic list of children from a data model list. `componentId` is the component to use as a template, and `dataBinding` is the path to the map of components in the data model. Values in the map will define the list of children.",
"additionalProperties": false,
"properties": {
"componentId": {
"type": "string"
},
"dataBinding": {
"type": "string"
}
},
"required": ["componentId", "dataBinding"]
}
}
},
"distribution": {
"type": "string",
"description": "Defines the arrangement of children along the main axis (horizontally). This corresponds to the CSS 'justify-content' property.",
"enum": [
"center",
"end",
"spaceAround",
"spaceBetween",
"spaceEvenly",
"start"
]
},
"alignment": {
"type": "string",
"description": "Defines the alignment of children along the cross axis (vertically). This corresponds to the CSS 'align-items' property.",
"enum": ["start", "center", "end", "stretch"]
}
},
"required": ["children"]
},
"Column": {
"type": "object",
"additionalProperties": false,
"properties": {
"children": {
"type": "object",
"description": "Defines the children. Use 'explicitList' for a fixed set of children, or 'template' to generate children from a data list.",
"additionalProperties": false,
"properties": {
"explicitList": {
"type": "array",
"items": {
"type": "string"
}
},
"template": {
"type": "object",
"description": "A template for generating a dynamic list of children from a data model list. `componentId` is the component to use as a template, and `dataBinding` is the path to the map of components in the data model. Values in the map will define the list of children.",
"additionalProperties": false,
"properties": {
"componentId": {
"type": "string"
},
"dataBinding": {
"type": "string"
}
},
"required": ["componentId", "dataBinding"]
}
}
},
"distribution": {
"type": "string",
"description": "Defines the arrangement of children along the main axis (vertically). This corresponds to the CSS 'justify-content' property.",
"enum": [
"start",
"center",
"end",
"spaceBetween",
"spaceAround",
"spaceEvenly"
]
},
"alignment": {
"type": "string",
"description": "Defines the alignment of children along the cross axis (horizontally). This corresponds to the CSS 'align-items' property.",
"enum": ["center", "end", "start", "stretch"]
}
},
"required": ["children"]
},
"List": {
"type": "object",
"additionalProperties": false,
"properties": {
"children": {
"type": "object",
"description": "Defines the children. Use 'explicitList' for a fixed set of children, or 'template' to generate children from a data list.",
"additionalProperties": false,
"properties": {
"explicitList": {
"type": "array",
"items": {
"type": "string"
}
},
"template": {
"type": "object",
"description": "A template for generating a dynamic list of children from a data model list. `componentId` is the component to use as a template, and `dataBinding` is the path to the map of components in the data model. Values in the map will define the list of children.",
"additionalProperties": false,
"properties": {
"componentId": {
"type": "string"
},
"dataBinding": {
"type": "string"
}
},
"required": ["componentId", "dataBinding"]
}
}
},
"direction": {
"type": "string",
"description": "The direction in which the list items are laid out.",
"enum": ["vertical", "horizontal"]
},
"alignment": {
"type": "string",
"description": "Defines the alignment of children along the cross axis.",
"enum": ["start", "center", "end", "stretch"]
}
},
"required": ["children"]
},
"Card": {
"type": "object",
"additionalProperties": false,
"properties": {
"child": {
"type": "string",
"description": "The ID of the component to be rendered inside the card."
}
},
"required": ["child"]
},
"Tabs": {
"type": "object",
"additionalProperties": false,
"properties": {
"tabItems": {
"type": "array",
"description": "An array of objects, where each object defines a tab with a title and a child component.",
"items": {
"type": "object",
"additionalProperties": false,
"properties": {
"title": {
"type": "object",
"description": "The tab title. Defines the value as either a literal value or a path to data model value (e.g. '/options/title').",
"additionalProperties": false,
"properties": {
"literalString": {
"type": "string"
},
"path": {
"type": "string"
}
}
},
"child": {
"type": "string"
}
},
"required": ["title", "child"]
}
}
},
"required": ["tabItems"]
},
"Divider": {
"type": "object",
"additionalProperties": false,
"properties": {
"axis": {
"type": "string",
"description": "The orientation of the divider.",
"enum": ["horizontal", "vertical"]
}
}
},
"Modal": {
"type": "object",
"additionalProperties": false,
"properties": {
"entryPointChild": {
"type": "string",
"description": "The ID of the component that opens the modal when interacted with (e.g., a button)."
},
"contentChild": {
"type": "string",
"description": "The ID of the component to be displayed inside the modal."
}
},
"required": ["entryPointChild", "contentChild"]
},
"Button": {
"type": "object",
"additionalProperties": false,
"properties": {
"child": {
"type": "string",
"description": "The ID of the component to display in the button, typically a Text component."
},
"primary": {
"type": "boolean",
"description": "Indicates if this button should be styled as the primary action."
},
"action": {
"type": "object",
"description": "The client-side action to be dispatched when the button is clicked. It includes the action's name and an optional context payload.",
"additionalProperties": false,
"properties": {
"name": {
"type": "string"
},
"context": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": false,
"properties": {
"key": {
"type": "string"
},
"value": {
"type": "object",
"description": "Defines the value to be included in the context as either a literal value or a path to a data model value (e.g. '/user/name').",
"additionalProperties": false,
"properties": {
"path": {
"type": "string"
},
"literalString": {
"type": "string"
},
"literalNumber": {
"type": "number"
},
"literalBoolean": {
"type": "boolean"
}
}
}
},
"required": ["key", "value"]
}
}
},
"required": ["name"]
}
},
"required": ["child", "action"]
},
"CheckBox": {
"type": "object",
"additionalProperties": false,
"properties": {
"label": {
"type": "object",
"description": "The text to display next to the checkbox. Defines the value as either a literal value or a path to data model ('path', e.g. '/option/label').",
"additionalProperties": false,
"properties": {
"literalString": {
"type": "string"
},
"path": {
"type": "string"
}
}
},
"value": {
"type": "object",
"description": "The current state of the checkbox (true for checked, false for unchecked). This can be a literal boolean ('literalBoolean') or a reference to a value in the data model ('path', e.g. '/filter/open').",
"additionalProperties": false,
"properties": {
"literalBoolean": {
"type": "boolean"
},
"path": {
"type": "string"
}
}
}
},
"required": ["label", "value"]
},
"TextField": {
"type": "object",
"additionalProperties": false,
"properties": {
"label": {
"type": "object",
"description": "The text label for the input field. This can be a literal string or a reference to a value in the data model ('path, e.g. '/user/name').",
"additionalProperties": false,
"properties": {
"literalString": {
"type": "string"
},
"path": {
"type": "string"
}
}
},
"text": {
"type": "object",
"description": "The value of the text field. This can be a literal string or a reference to a value in the data model ('path', e.g. '/user/name').",
"additionalProperties": false,
"properties": {
"literalString": {
"type": "string"
},
"path": {
"type": "string"
}
}
},
"textFieldType": {
"type": "string",
"description": "The type of input field to display.",
"enum": [
"date",
"longText",
"number",
"shortText",
"obscured"
]
},
"validationRegexp": {
"type": "string",
"description": "A regular expression used for client-side validation of the input."
}
},
"required": ["label"]
},
"DateTimeInput": {
"type": "object",
"additionalProperties": false,
"properties": {
"value": {
"type": "object",
"description": "The selected date and/or time value in ISO 8601 format. This can be a literal string ('literalString') or a reference to a value in the data model ('path', e.g. '/user/dob').",
"additionalProperties": false,
"properties": {
"literalString": {
"type": "string"
},
"path": {
"type": "string"
}
}
},
"enableDate": {
"type": "boolean",
"description": "If true, allows the user to select a date."
},
"enableTime": {
"type": "boolean",
"description": "If true, allows the user to select a time."
}
},
"required": ["value"]
},
"MultipleChoice": {
"type": "object",
"additionalProperties": false,
"properties": {
"selections": {
"type": "object",
"description": "The currently selected values for the component. This can be a literal array of strings or a path to an array in the data model('path', e.g. '/hotel/options').",
"additionalProperties": false,
"properties": {
"literalArray": {
"type": "array",
"items": {
"type": "string"
}
},
"path": {
"type": "string"
}
}
},
"options": {
"type": "array",
"description": "An array of available options for the user to choose from.",
"items": {
"type": "object",
"additionalProperties": false,
"properties": {
"label": {
"type": "object",
"description": "The text to display for this option. This can be a literal string or a reference to a value in the data model (e.g. '/option/label').",
"additionalProperties": false,
"properties": {
"literalString": {
"type": "string"
},
"path": {
"type": "string"
}
}
},
"value": {
"type": "string",
"description": "The value to be associated with this option when selected."
}
},
"required": ["label", "value"]
}
},
"maxAllowedSelections": {
"type": "integer",
"description": "The maximum number of options that the user is allowed to select."
},
"variant": {
"type": "string",
"enum": ["checkbox", "chips"],
"description": "The visual variant for the selection UI."
},
"filterable": {
"type": "boolean",
"description": "Whether options can be filtered by typing."
}
},
"required": ["selections", "options"]
},
"Slider": {
"type": "object",
"additionalProperties": false,
"properties": {
"value": {
"type": "object",
"description": "The current value of the slider. This can be a literal number ('literalNumber') or a reference to a value in the data model ('path', e.g. '/restaurant/cost').",
"additionalProperties": false,
"properties": {
"literalNumber": {
"type": "number"
},
"path": {
"type": "string"
}
}
},
"minValue": {
"type": "number",
"description": "The minimum value of the slider."
},
"maxValue": {
"type": "number",
"description": "The maximum value of the slider."
}
},
"required": ["value"]
}
}
}
},
"required": ["id", "component"]
}
}
},
"required": ["surfaceId", "components"]
},
"dataModelUpdate": {
"type": "object",
"description": "Updates the data model for a surface.",
"additionalProperties": false,
"properties": {
"surfaceId": {
"type": "string",
"description": "The unique identifier for the UI surface this data model update applies to."
},
"path": {
"type": "string",
"description": "An optional path to a location within the data model (e.g., '/user/name'). If omitted, or set to '/', the entire data model will be replaced."
},
"contents": {
"type": "array",
"description": "An array of data entries. Each entry must contain a 'key' and exactly one corresponding typed 'value*' property.",
"items": {
"type": "object",
"description": "A single data entry. Exactly one 'value*' property should be provided alongside the key.",
"additionalProperties": false,
"properties": {
"key": {
"type": "string",
"description": "The key for this data entry."
},
"valueString": {
"type": "string"
},
"valueNumber": {
"type": "number"
},
"valueBoolean": {
"type": "boolean"
},
"valueMap": {
"description": "Represents a map as an adjacency list.",
"type": "array",
"items": {
"type": "object",
"description": "One entry in the map. Exactly one 'value*' property should be provided alongside the key.",
"additionalProperties": false,
"properties": {
"key": {
"type": "string"
},
"valueString": {
"type": "string"
},
"valueNumber": {
"type": "number"
},
"valueBoolean": {
"type": "boolean"
}
},
"required": ["key"]
}
}
},
"required": ["key"]
}
}
},
"required": ["contents", "surfaceId"]
},
"deleteSurface": {
"type": "object",
"description": "Signals the client to delete the surface identified by 'surfaceId'.",
"additionalProperties": false,
"properties": {
"surfaceId": {
"type": "string",
"description": "The unique identifier for the UI surface to be deleted."
}
},
"required": ["surfaceId"]
}
}
}

View File

@@ -1,459 +0,0 @@
{
"components": {
"Text": {
"type": "object",
"additionalProperties": false,
"properties": {
"text": {
"type": "object",
"description": "The text content to display. This can be a literal string or a reference to a value in the data model ('path', e.g., '/doc/title'). While simple Markdown formatting is supported (i.e. without HTML, images, or links), utilizing dedicated UI components is generally preferred for a richer and more structured presentation.",
"additionalProperties": false,
"properties": {
"literalString": { "type": "string" },
"path": { "type": "string" }
}
},
"usageHint": {
"type": "string",
"description": "A hint for the base text style.",
"enum": ["h1", "h2", "h3", "h4", "h5", "caption", "body"]
}
},
"required": ["text"]
},
"Image": {
"type": "object",
"additionalProperties": false,
"properties": {
"url": {
"type": "object",
"description": "The URL of the image to display.",
"additionalProperties": false,
"properties": {
"literalString": { "type": "string" },
"path": { "type": "string" }
}
},
"fit": {
"type": "string",
"description": "Specifies how the image should be resized to fit its container.",
"enum": ["contain", "cover", "fill", "none", "scale-down"]
},
"usageHint": {
"type": "string",
"description": "A hint for the image size and style.",
"enum": ["icon", "avatar", "smallFeature", "mediumFeature", "largeFeature", "header"]
}
},
"required": ["url"]
},
"Icon": {
"type": "object",
"additionalProperties": false,
"properties": {
"name": {
"type": "object",
"description": "The name of the icon to display.",
"additionalProperties": false,
"properties": {
"literalString": {
"type": "string",
"enum": [
"accountCircle", "add", "arrowBack", "arrowForward", "attachFile",
"calendarToday", "call", "camera", "check", "close", "delete",
"download", "edit", "event", "error", "favorite", "favoriteOff",
"folder", "help", "home", "info", "locationOn", "lock", "lockOpen",
"mail", "menu", "moreVert", "moreHoriz", "notificationsOff",
"notifications", "payment", "person", "phone", "photo", "print",
"refresh", "search", "send", "settings", "share", "shoppingCart",
"star", "starHalf", "starOff", "upload", "visibility",
"visibilityOff", "warning"
]
},
"path": { "type": "string" }
}
}
},
"required": ["name"]
},
"Video": {
"type": "object",
"additionalProperties": false,
"properties": {
"url": {
"type": "object",
"description": "The URL of the video to display.",
"additionalProperties": false,
"properties": {
"literalString": { "type": "string" },
"path": { "type": "string" }
}
}
},
"required": ["url"]
},
"AudioPlayer": {
"type": "object",
"additionalProperties": false,
"properties": {
"url": {
"type": "object",
"description": "The URL of the audio to be played.",
"additionalProperties": false,
"properties": {
"literalString": { "type": "string" },
"path": { "type": "string" }
}
},
"description": {
"type": "object",
"description": "A description of the audio, such as a title or summary.",
"additionalProperties": false,
"properties": {
"literalString": { "type": "string" },
"path": { "type": "string" }
}
}
},
"required": ["url"]
},
"Row": {
"type": "object",
"additionalProperties": false,
"properties": {
"children": {
"type": "object",
"description": "Defines the children. Use 'explicitList' for a fixed set of children, or 'template' to generate children from a data list.",
"additionalProperties": false,
"properties": {
"explicitList": { "type": "array", "items": { "type": "string" } },
"template": {
"type": "object",
"additionalProperties": false,
"properties": {
"componentId": { "type": "string" },
"dataBinding": { "type": "string" }
},
"required": ["componentId", "dataBinding"]
}
}
},
"distribution": {
"type": "string",
"enum": ["center", "end", "spaceAround", "spaceBetween", "spaceEvenly", "start"]
},
"alignment": {
"type": "string",
"enum": ["start", "center", "end", "stretch"]
}
},
"required": ["children"]
},
"Column": {
"type": "object",
"additionalProperties": false,
"properties": {
"children": {
"type": "object",
"description": "Defines the children. Use 'explicitList' for a fixed set of children, or 'template' to generate children from a data list.",
"additionalProperties": false,
"properties": {
"explicitList": { "type": "array", "items": { "type": "string" } },
"template": {
"type": "object",
"additionalProperties": false,
"properties": {
"componentId": { "type": "string" },
"dataBinding": { "type": "string" }
},
"required": ["componentId", "dataBinding"]
}
}
},
"distribution": {
"type": "string",
"enum": ["start", "center", "end", "spaceBetween", "spaceAround", "spaceEvenly"]
},
"alignment": {
"type": "string",
"enum": ["center", "end", "start", "stretch"]
}
},
"required": ["children"]
},
"List": {
"type": "object",
"additionalProperties": false,
"properties": {
"children": {
"type": "object",
"description": "Defines the children. Use 'explicitList' for a fixed set of children, or 'template' to generate children from a data list.",
"additionalProperties": false,
"properties": {
"explicitList": { "type": "array", "items": { "type": "string" } },
"template": {
"type": "object",
"additionalProperties": false,
"properties": {
"componentId": { "type": "string" },
"dataBinding": { "type": "string" }
},
"required": ["componentId", "dataBinding"]
}
}
},
"direction": {
"type": "string",
"enum": ["vertical", "horizontal"]
},
"alignment": {
"type": "string",
"enum": ["start", "center", "end", "stretch"]
}
},
"required": ["children"]
},
"Card": {
"type": "object",
"additionalProperties": false,
"properties": {
"child": {
"type": "string",
"description": "The ID of the component to be rendered inside the card."
}
},
"required": ["child"]
},
"Tabs": {
"type": "object",
"additionalProperties": false,
"properties": {
"tabItems": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": false,
"properties": {
"title": {
"type": "object",
"additionalProperties": false,
"properties": {
"literalString": { "type": "string" },
"path": { "type": "string" }
}
},
"child": { "type": "string" }
},
"required": ["title", "child"]
}
}
},
"required": ["tabItems"]
},
"Divider": {
"type": "object",
"additionalProperties": false,
"properties": {
"axis": {
"type": "string",
"enum": ["horizontal", "vertical"]
}
}
},
"Modal": {
"type": "object",
"additionalProperties": false,
"properties": {
"entryPointChild": {
"type": "string",
"description": "The ID of the component that opens the modal when interacted with."
},
"contentChild": {
"type": "string",
"description": "The ID of the component to be displayed inside the modal."
}
},
"required": ["entryPointChild", "contentChild"]
},
"Button": {
"type": "object",
"additionalProperties": false,
"properties": {
"child": {
"type": "string",
"description": "The ID of the component to display in the button."
},
"primary": {
"type": "boolean",
"description": "Indicates if this button should be styled as the primary action."
},
"action": {
"type": "object",
"additionalProperties": false,
"properties": {
"name": { "type": "string" },
"context": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": false,
"properties": {
"key": { "type": "string" },
"value": {
"type": "object",
"additionalProperties": false,
"properties": {
"path": { "type": "string" },
"literalString": { "type": "string" },
"literalNumber": { "type": "number" },
"literalBoolean": { "type": "boolean" }
}
}
},
"required": ["key", "value"]
}
}
},
"required": ["name"]
}
},
"required": ["child", "action"]
},
"CheckBox": {
"type": "object",
"additionalProperties": false,
"properties": {
"label": {
"type": "object",
"additionalProperties": false,
"properties": {
"literalString": { "type": "string" },
"path": { "type": "string" }
}
},
"value": {
"type": "object",
"additionalProperties": false,
"properties": {
"literalBoolean": { "type": "boolean" },
"path": { "type": "string" }
}
}
},
"required": ["label", "value"]
},
"TextField": {
"type": "object",
"additionalProperties": false,
"properties": {
"label": {
"type": "object",
"additionalProperties": false,
"properties": {
"literalString": { "type": "string" },
"path": { "type": "string" }
}
},
"text": {
"type": "object",
"additionalProperties": false,
"properties": {
"literalString": { "type": "string" },
"path": { "type": "string" }
}
},
"textFieldType": {
"type": "string",
"enum": ["date", "longText", "number", "shortText", "obscured"]
},
"validationRegexp": { "type": "string" }
},
"required": ["label"]
},
"DateTimeInput": {
"type": "object",
"additionalProperties": false,
"properties": {
"value": {
"type": "object",
"additionalProperties": false,
"properties": {
"literalString": { "type": "string" },
"path": { "type": "string" }
}
},
"enableDate": { "type": "boolean" },
"enableTime": { "type": "boolean" }
},
"required": ["value"]
},
"MultipleChoice": {
"type": "object",
"additionalProperties": false,
"properties": {
"selections": {
"type": "object",
"additionalProperties": false,
"properties": {
"literalArray": { "type": "array", "items": { "type": "string" } },
"path": { "type": "string" }
}
},
"options": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": false,
"properties": {
"label": {
"type": "object",
"additionalProperties": false,
"properties": {
"literalString": { "type": "string" },
"path": { "type": "string" }
}
},
"value": { "type": "string" }
},
"required": ["label", "value"]
}
},
"maxAllowedSelections": { "type": "integer" },
"variant": {
"type": "string",
"enum": ["checkbox", "chips"]
},
"filterable": { "type": "boolean" }
},
"required": ["selections", "options"]
},
"Slider": {
"type": "object",
"additionalProperties": false,
"properties": {
"value": {
"type": "object",
"additionalProperties": false,
"properties": {
"literalNumber": { "type": "number" },
"path": { "type": "string" }
}
},
"minValue": { "type": "number" },
"maxValue": { "type": "number" }
},
"required": ["value"]
}
},
"styles": {
"font": {
"type": "string",
"description": "The primary font for the UI."
},
"primaryColor": {
"type": "string",
"description": "The primary UI color as a hexadecimal code (e.g., '#00BFFF').",
"pattern": "^#[0-9a-fA-F]{6}$"
}
}
}

View File

@@ -1,97 +0,0 @@
{
"$schema": "https://json-schema.org/draft/2020-12/schema",
"$id": "https://a2ui.org/specification/v0_9/client_capabilities.json",
"title": "A2UI Client Capabilities Schema",
"description": "A schema for the a2uiClientCapabilities object, which is sent from the client to the server as part of the A2A metadata to describe the client's UI rendering capabilities.",
"type": "object",
"properties": {
"v0.9": {
"type": "object",
"description": "The capabilities structure for version 0.9 of the A2UI protocol.",
"properties": {
"supportedCatalogIds": {
"type": "array",
"description": "The URI of each of the component and function catalogs that is supported by the client.",
"items": { "type": "string" }
},
"inlineCatalogs": {
"type": "array",
"description": "An array of inline catalog definitions, which can contain both components and functions. This should only be provided if the agent declares 'acceptsInlineCatalogs: true' in its capabilities.",
"items": { "$ref": "#/$defs/Catalog" }
}
},
"required": ["supportedCatalogIds"]
}
},
"required": ["v0.9"],
"$defs": {
"FunctionDefinition": {
"type": "object",
"description": "Describes a function's interface.",
"properties": {
"name": {
"type": "string",
"description": "The unique name of the function."
},
"description": {
"type": "string",
"description": "A human-readable description of what the function does and how to use it."
},
"parameters": {
"type": "object",
"description": "A JSON Schema describing the expected arguments (args) for this function.",
"$ref": "https://json-schema.org/draft/2020-12/schema"
},
"returnType": {
"type": "string",
"enum": [
"string",
"number",
"boolean",
"array",
"object",
"any",
"void"
],
"description": "The type of value this function returns."
}
},
"required": ["name", "parameters", "returnType"],
"additionalProperties": false
},
"Catalog": {
"type": "object",
"description": "A collection of component and function definitions.",
"properties": {
"catalogId": {
"type": "string",
"description": "Unique identifier for this catalog."
},
"components": {
"type": "object",
"description": "Definitions for UI components supported by this catalog.",
"additionalProperties": {
"$ref": "https://json-schema.org/draft/2020-12/schema"
}
},
"functions": {
"type": "array",
"description": "Definitions for functions supported by this catalog.",
"items": {
"$ref": "#/$defs/FunctionDefinition"
}
},
"theme": {
"title": "A2UI Theme",
"description": "A schema that defines a catalog of A2UI theme properties. Each key is a theme property name (e.g. 'primaryColor'), and each value is the JSON schema for that property.",
"type": "object",
"additionalProperties": {
"$ref": "https://json-schema.org/draft/2020-12/schema"
}
}
},
"required": ["catalogId"],
"additionalProperties": false
}
}
}

View File

@@ -1,22 +0,0 @@
{
"$schema": "https://json-schema.org/draft/2020-12/schema",
"$id": "https://a2ui.org/specification/v0_9/client_data_model.json",
"title": "A2UI Client Data Model Schema",
"description": "Schema for attaching the client data model to A2A message metadata. This object should be placed in the `a2uiClientDataModel` field of the metadata.",
"type": "object",
"properties": {
"version": {
"const": "v0.9"
},
"surfaces": {
"type": "object",
"description": "A map of surface IDs to their current data models.",
"additionalProperties": {
"type": "object",
"description": "The current data model for the surface, as a standard JSON object."
}
}
},
"required": ["version", "surfaces"],
"additionalProperties": false
}

View File

@@ -1,104 +0,0 @@
{
"title": "A2UI (Agent to UI) Client-to-Server Event Schema",
"description": "Describes a JSON payload for a client-to-server event message.",
"type": "object",
"minProperties": 2,
"maxProperties": 2,
"properties": {
"version": {
"const": "v0.9"
},
"action": {
"type": "object",
"description": "Reports a user-initiated action from a component.",
"properties": {
"name": {
"type": "string",
"description": "The name of the action, taken from the component's action.event.name property."
},
"surfaceId": {
"type": "string",
"description": "The id of the surface where the event originated."
},
"sourceComponentId": {
"type": "string",
"description": "The id of the component that triggered the event."
},
"timestamp": {
"type": "string",
"format": "date-time",
"description": "An ISO 8601 timestamp of when the event occurred."
},
"context": {
"type": "object",
"description": "A JSON object containing the key-value pairs from the component's action.event.context, after resolving all data bindings.",
"additionalProperties": true
}
},
"required": [
"name",
"surfaceId",
"sourceComponentId",
"timestamp",
"context"
]
},
"error": {
"description": "Reports a client-side error.",
"oneOf": [
{
"type": "object",
"title": "Validation Failed Error",
"properties": {
"code": {
"const": "VALIDATION_FAILED"
},
"surfaceId": {
"type": "string",
"description": "The id of the surface where the error occurred."
},
"path": {
"type": "string",
"description": "The JSON pointer to the field that failed validation (e.g. '/components/0/text')."
},
"message": {
"type": "string",
"description": "A short one or two sentence description of why validation failed."
}
},
"required": ["code", "path", "message", "surfaceId"],
"additionalProperties": false
},
{
"type": "object",
"title": "Generic Error",
"properties": {
"code": {
"not": {
"const": "VALIDATION_FAILED"
}
},
"message": {
"type": "string",
"description": "A short one or two sentence description of why the error occurred."
},
"surfaceId": {
"type": "string",
"description": "The id of the surface where the error occurred."
}
},
"required": ["code", "surfaceId", "message"],
"additionalProperties": true
}
]
}
},
"oneOf": [
{
"required": ["action", "version"]
},
{
"required": ["error", "version"]
}
]
}

View File

@@ -1,315 +0,0 @@
{
"$schema": "https://json-schema.org/draft/2020-12/schema",
"$id": "https://a2ui.org/specification/v0_9/common_types.json",
"title": "A2UI Common Types",
"description": "Common type definitions used across A2UI schemas.",
"$defs": {
"ComponentId": {
"type": "string",
"description": "The unique identifier for a component, used for both definitions and references within the same surface."
},
"AccessibilityAttributes": {
"type": "object",
"description": "Attributes to enhance accessibility when using assistive technologies like screen readers.",
"properties": {
"label": {
"$ref": "#/$defs/DynamicString",
"description": "A short string, typically 1 to 3 words, used by assistive technologies to convey the purpose or intent of an element. For example, an input field might have an accessible label of 'User ID' or a button might be labeled 'Submit'."
},
"description": {
"$ref": "#/$defs/DynamicString",
"description": "Additional information provided by assistive technologies about an element such as instructions, format requirements, or result of an action. For example, a mute button might have a label of 'Mute' and a description of 'Silences notifications about this conversation'."
}
}
},
"ComponentCommon": {
"type": "object",
"properties": {
"id": {
"$ref": "#/$defs/ComponentId"
},
"accessibility": {
"$ref": "#/$defs/AccessibilityAttributes"
}
},
"required": ["id"]
},
"ChildList": {
"oneOf": [
{
"type": "array",
"items": {
"$ref": "#/$defs/ComponentId"
},
"description": "A static list of child component IDs."
},
{
"type": "object",
"description": "A template for generating a dynamic list of children from a data model list. The `componentId` is the component to use as a template.",
"properties": {
"componentId": {
"$ref": "#/$defs/ComponentId"
},
"path": {
"type": "string",
"description": "The path to the list of component property objects in the data model."
}
},
"required": ["componentId", "path"],
"additionalProperties": false
}
]
},
"DataBinding": {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "A JSON Pointer path to a value in the data model."
}
},
"required": ["path"],
"additionalProperties": false
},
"DynamicValue": {
"description": "A value that can be a literal, a path, or a function call returning any type.",
"oneOf": [
{
"type": "string"
},
{
"type": "number"
},
{
"type": "boolean"
},
{
"type": "array"
},
{
"$ref": "#/$defs/DataBinding"
},
{
"$ref": "#/$defs/FunctionCall"
}
]
},
"DynamicString": {
"description": "Represents a string",
"oneOf": [
{
"type": "string"
},
{
"$ref": "#/$defs/DataBinding"
},
{
"allOf": [
{
"$ref": "#/$defs/FunctionCall"
},
{
"properties": {
"returnType": {
"const": "string"
}
}
}
]
}
]
},
"DynamicNumber": {
"description": "Represents a value that can be either a literal number, a path to a number in the data model, or a function call returning a number.",
"oneOf": [
{
"type": "number"
},
{
"$ref": "#/$defs/DataBinding"
},
{
"allOf": [
{
"$ref": "#/$defs/FunctionCall"
},
{
"properties": {
"returnType": {
"const": "number"
}
}
}
]
}
]
},
"DynamicBoolean": {
"description": "A boolean value that can be a literal, a path, or a function call returning a boolean.",
"oneOf": [
{
"type": "boolean"
},
{
"$ref": "#/$defs/DataBinding"
},
{
"allOf": [
{
"$ref": "#/$defs/FunctionCall"
},
{
"properties": {
"returnType": {
"const": "boolean"
}
}
}
]
}
]
},
"DynamicStringList": {
"description": "Represents a value that can be either a literal array of strings, a path to a string array in the data model, or a function call returning a string array.",
"oneOf": [
{
"type": "array",
"items": {
"type": "string"
}
},
{
"$ref": "#/$defs/DataBinding"
},
{
"allOf": [
{
"$ref": "#/$defs/FunctionCall"
},
{
"properties": {
"returnType": {
"const": "array"
}
}
}
]
}
]
},
"FunctionCall": {
"type": "object",
"description": "Invokes a named function on the client.",
"properties": {
"call": {
"type": "string",
"description": "The name of the function to call."
},
"args": {
"type": "object",
"description": "Arguments passed to the function.",
"additionalProperties": {
"anyOf": [
{
"$ref": "#/$defs/DynamicValue"
},
{
"type": "object",
"description": "A literal object argument (e.g. configuration)."
}
]
}
},
"returnType": {
"type": "string",
"description": "The expected return type of the function call.",
"enum": [
"string",
"number",
"boolean",
"array",
"object",
"any",
"void"
],
"default": "boolean"
}
},
"required": ["call"],
"oneOf": [
{ "$ref": "basic_catalog.json#/$defs/anyFunction" }
]
},
"CheckRule": {
"type": "object",
"description": "A single validation rule applied to an input component.",
"properties": {
"condition": {
"$ref": "#/$defs/DynamicBoolean"
},
"message": {
"type": "string",
"description": "The error message to display if the check fails."
}
},
"required": ["condition", "message"],
"additionalProperties": false
},
"Checkable": {
"description": "Properties for components that support client-side checks.",
"type": "object",
"properties": {
"checks": {
"type": "array",
"description": "A list of checks to perform. These are function calls that must return a boolean indicating validity.",
"items": {
"$ref": "#/$defs/CheckRule"
}
}
}
},
"Action": {
"description": "Defines an interaction handler that can either trigger a server-side event or execute a local client-side function.",
"oneOf": [
{
"type": "object",
"description": "Triggers a server-side event.",
"properties": {
"event": {
"type": "object",
"description": "The event to dispatch to the server.",
"properties": {
"name": {
"type": "string",
"description": "The name of the action to be dispatched to the server."
},
"context": {
"type": "object",
"description": "A JSON object containing the key-value pairs for the action context. Values can be literals or paths. Use literal values unless the value must be dynamically bound to the data model. Do NOT use paths for static IDs.",
"additionalProperties": {
"$ref": "#/$defs/DynamicValue"
}
}
},
"required": ["name"],
"additionalProperties": false
}
},
"required": ["event"],
"additionalProperties": false
},
{
"type": "object",
"description": "Executes a local client-side function.",
"properties": {
"functionCall": {
"$ref": "#/$defs/FunctionCall"
}
},
"required": ["functionCall"],
"additionalProperties": false
}
]
}
}
}

View File

@@ -1,26 +0,0 @@
{
"$schema": "https://json-schema.org/draft/2020-12/schema",
"$id": "https://a2ui.org/specification/v0_9/server_capabilities.json",
"title": "A2UI Server Capabilities Schema",
"description": "A schema for the server capabilities object, which is used by an A2UI server (or Agent) to advertise its supported UI features to clients. This can be embedded in an Agent Card for A2A or used in other transport protocols like MCP.",
"type": "object",
"properties": {
"v0.9": {
"type": "object",
"description": "The server capabilities structure for version 0.9 of the A2UI protocol.",
"properties": {
"supportedCatalogIds": {
"type": "array",
"description": "An array of strings, where each string is an ID identifying a Catalog Definition Schema that the server can generate. This is not necessarily a resolvable URI.",
"items": { "type": "string" }
},
"acceptsInlineCatalogs": {
"type": "boolean",
"description": "A boolean indicating if the server can accept an 'inlineCatalogs' array in the client's a2uiClientCapabilities. If omitted, this defaults to false.",
"default": false
}
}
}
},
"required": ["v0.9"]
}

View File

@@ -1,132 +0,0 @@
{
"$schema": "https://json-schema.org/draft/2020-12/schema",
"$id": "https://a2ui.org/specification/v0_9/server_to_client.json",
"title": "A2UI Message Schema",
"description": "Describes a JSON payload for an A2UI (Agent to UI) message, which is used to dynamically construct and update user interfaces.",
"type": "object",
"oneOf": [
{ "$ref": "#/$defs/CreateSurfaceMessage" },
{ "$ref": "#/$defs/UpdateComponentsMessage" },
{ "$ref": "#/$defs/UpdateDataModelMessage" },
{ "$ref": "#/$defs/DeleteSurfaceMessage" }
],
"$defs": {
"CreateSurfaceMessage": {
"type": "object",
"properties": {
"version": {
"const": "v0.9"
},
"createSurface": {
"type": "object",
"description": "Signals the client to create a new surface and begin rendering it. When this message is sent, the client will expect 'updateComponents' and/or 'updateDataModel' messages for the same surfaceId that define the component tree.",
"properties": {
"surfaceId": {
"type": "string",
"description": "The unique identifier for the UI surface to be rendered."
},
"catalogId": {
"description": "A string that uniquely identifies this catalog. It is recommended to prefix this with an internet domain that you own, to avoid conflicts e.g. mycompany.com:somecatalog'.",
"type": "string"
},
"theme": {
"$ref": "basic_catalog.json#/$defs/theme",
"description": "Theme parameters for the surface (e.g., {'primaryColor': '#FF0000'}). These must validate against the 'theme' schema defined in the catalog."
},
"sendDataModel": {
"type": "boolean",
"description": "If true, the client will send the full data model of this surface in the metadata of every A2A message sent to the server that created the surface. Defaults to false."
}
},
"required": ["surfaceId", "catalogId"],
"additionalProperties": false
}
},
"required": ["createSurface", "version"],
"additionalProperties": false
},
"UpdateComponentsMessage": {
"type": "object",
"properties": {
"version": {
"const": "v0.9"
},
"updateComponents": {
"type": "object",
"description": "Updates a surface with a new set of components. This message can be sent multiple times to update the component tree of an existing surface. One of the components in one of the components lists MUST have an 'id' of 'root' to serve as the root of the component tree. The createSurface message MUST have been previously sent with the 'catalogId' that is in this message.",
"properties": {
"surfaceId": {
"type": "string",
"description": "The unique identifier for the UI surface to be updated."
},
"components": {
"type": "array",
"description": "A list containing all UI components for the surface.",
"minItems": 1,
"items": {
"$ref": "basic_catalog.json#/$defs/anyComponent"
}
}
},
"required": ["surfaceId", "components"],
"additionalProperties": false
}
},
"required": ["updateComponents", "version"],
"additionalProperties": false
},
"UpdateDataModelMessage": {
"type": "object",
"properties": {
"version": {
"const": "v0.9"
},
"updateDataModel": {
"type": "object",
"description": "Updates the data model for an existing surface. This message can be sent multiple times to update the data model. The createSurface message MUST have been previously sent with the 'catalogId' that is in this message.",
"properties": {
"surfaceId": {
"type": "string",
"description": "The unique identifier for the UI surface this data model update applies to."
},
"path": {
"type": "string",
"description": "An optional path to a location within the data model (e.g., '/user/name'). If omitted, or set to '/', refers to the entire data model."
},
"value": {
"description": "The data to be updated in the data model. If present, the value at 'path' is replaced (or created). If omitted, the key at 'path' is removed.",
"additionalProperties": true
}
},
"required": ["surfaceId"],
"additionalProperties": false
}
},
"required": ["updateDataModel", "version"],
"additionalProperties": false
},
"DeleteSurfaceMessage": {
"type": "object",
"properties": {
"version": {
"const": "v0.9"
},
"deleteSurface": {
"type": "object",
"description": "Signals the client to delete the surface identified by 'surfaceId'. The createSurface message MUST have been previously sent with the 'catalogId' that is in this message.",
"properties": {
"surfaceId": {
"type": "string",
"description": "The unique identifier for the UI surface to be deleted."
}
},
"required": ["surfaceId"],
"additionalProperties": false
}
},
"required": ["deleteSurface", "version"],
"additionalProperties": false
}
}
}

View File

@@ -1,160 +0,0 @@
"""A2UI server extension for the A2A protocol."""
from __future__ import annotations
import logging
from typing import Any
from crewai.a2a.extensions.a2ui.models import A2UIResponse, extract_a2ui_json_objects
from crewai.a2a.extensions.a2ui.v0_9 import (
extract_a2ui_v09_json_objects,
)
from crewai.a2a.extensions.a2ui.validator import (
A2UIValidationError,
validate_a2ui_message,
validate_a2ui_message_v09,
)
from crewai.a2a.extensions.server import ExtensionContext, ServerExtension
logger = logging.getLogger(__name__)
A2UI_MIME_TYPE = "application/json+a2ui"
A2UI_EXTENSION_URI = "https://a2ui.org/a2a-extension/a2ui/v0.8"
A2UI_STANDARD_CATALOG_ID = (
"https://a2ui.org/specification/v0_8/standard_catalog_definition.json"
)
A2UI_V09_EXTENSION_URI = "https://a2ui.org/a2a-extension/a2ui/v0.9"
A2UI_V09_BASIC_CATALOG_ID = "https://a2ui.org/specification/v0_9/basic_catalog.json"
class A2UIServerExtension(ServerExtension):
"""A2A server extension that enables A2UI declarative UI generation.
Supports both v0.8 and v0.9 of the A2UI protocol via the ``version``
parameter. When activated by a client, this extension:
* Negotiates catalog preferences during ``on_request``.
* Wraps A2UI messages in the agent response as A2A DataParts with
``application/json+a2ui`` MIME type during ``on_response``.
Example::
A2AServerConfig
server_extensions=[A2UIServerExtension],
default_output_modes=["text/plain", "application/json+a2ui"],
"""
uri: str = A2UI_EXTENSION_URI
required: bool = False
description: str = "A2UI declarative UI generation"
def __init__(
self,
catalog_ids: list[str] | None = None,
accept_inline_catalogs: bool = False,
version: str = "v0.8",
) -> None:
"""Initialize the A2UI server extension.
Args:
catalog_ids: Catalog identifiers this server supports.
accept_inline_catalogs: Whether inline catalog definitions are accepted.
version: Protocol version, ``"v0.8"`` or ``"v0.9"``.
"""
self._catalog_ids = catalog_ids or []
self._accept_inline_catalogs = accept_inline_catalogs
self._version = version
if version == "v0.9":
self.uri = A2UI_V09_EXTENSION_URI
@property
def params(self) -> dict[str, Any]:
"""Extension parameters advertised in the AgentCard."""
result: dict[str, Any] = {}
if self._catalog_ids:
result["supportedCatalogIds"] = self._catalog_ids
result["acceptsInlineCatalogs"] = self._accept_inline_catalogs
return result
async def on_request(self, context: ExtensionContext) -> None:
"""Extract A2UI catalog preferences from the client request.
Stores the negotiated catalog in ``context.state`` under
``"a2ui_catalog_id"`` for downstream use.
"""
if not self.is_active(context):
return
catalog_id = context.get_extension_metadata(self.uri, "catalogId")
if isinstance(catalog_id, str):
context.state["a2ui_catalog_id"] = catalog_id
elif self._catalog_ids:
context.state["a2ui_catalog_id"] = self._catalog_ids[0]
context.state["a2ui_active"] = True
async def on_response(self, context: ExtensionContext, result: Any) -> Any:
"""Wrap A2UI messages in the result as A2A DataParts.
Scans the result for A2UI JSON payloads and converts them into
DataParts with ``application/json+a2ui`` MIME type and A2UI metadata.
Dispatches to the correct extractor and validator based on version.
"""
if not context.state.get("a2ui_active"):
return result
if not isinstance(result, str):
return result
if self._version == "v0.9":
a2ui_messages = extract_a2ui_v09_json_objects(result)
else:
a2ui_messages = extract_a2ui_json_objects(result)
if not a2ui_messages:
return result
build_fn = _build_data_part_v09 if self._version == "v0.9" else _build_data_part
data_parts = [
part
for part in (build_fn(msg_data) for msg_data in a2ui_messages)
if part is not None
]
if not data_parts:
return result
return A2UIResponse(text=result, a2ui_parts=data_parts)
def _build_data_part(msg_data: dict[str, Any]) -> dict[str, Any] | None:
"""Validate a v0.8 A2UI message and wrap it as a DataPart dict."""
try:
validated = validate_a2ui_message(msg_data)
except A2UIValidationError:
logger.warning("Skipping invalid A2UI message in response", exc_info=True)
return None
return {
"kind": "data",
"data": validated.model_dump(by_alias=True, exclude_none=True),
"metadata": {
"mimeType": A2UI_MIME_TYPE,
},
}
def _build_data_part_v09(msg_data: dict[str, Any]) -> dict[str, Any] | None:
"""Validate a v0.9 A2UI message and wrap it as a DataPart dict."""
try:
validated = validate_a2ui_message_v09(msg_data)
except A2UIValidationError:
logger.warning("Skipping invalid A2UI v0.9 message in response", exc_info=True)
return None
return {
"kind": "data",
"data": validated.model_dump(by_alias=True, exclude_none=True),
"metadata": {
"mimeType": A2UI_MIME_TYPE,
},
}

View File

@@ -1,831 +0,0 @@
"""Pydantic models for A2UI v0.9 protocol messages and types.
This module provides v0.9 counterparts to the v0.8 models in ``models.py``.
Key differences from v0.8:
* ``beginRendering`` → ``createSurface`` — adds ``theme``, ``sendDataModel``,
requires ``catalogId``.
* ``surfaceUpdate`` → ``updateComponents`` — component structure is flat:
``component`` is a type-name string, properties live at the top level.
* ``dataModelUpdate`` → ``updateDataModel`` — ``contents`` adjacency list
replaced by a single ``value`` of any JSON type; ``path`` uses JSON Pointers.
* All messages carry a ``version: "v0.9"`` discriminator.
* Data binding uses plain JSON values, ``DataBinding`` objects, or
``FunctionCall`` objects instead of ``literalString`` / ``path`` wrappers.
* ``MultipleChoice`` is replaced by ``ChoicePicker``.
* ``Styles`` is replaced by ``Theme`` — adds ``iconUrl``, ``agentDisplayName``.
* Client-to-server ``userAction`` is renamed to ``action``; ``error`` gains
structured ``code`` / ``path`` fields.
"""
from __future__ import annotations
import json
from typing import Any, Literal, get_args
from pydantic import BaseModel, ConfigDict, Field, model_validator
ComponentName = Literal[
"Text",
"Image",
"Icon",
"Video",
"AudioPlayer",
"Row",
"Column",
"List",
"Card",
"Tabs",
"Modal",
"Divider",
"Button",
"TextField",
"CheckBox",
"ChoicePicker",
"Slider",
"DateTimeInput",
]
BASIC_CATALOG_COMPONENTS: frozenset[ComponentName] = frozenset(get_args(ComponentName))
FunctionName = Literal[
"required",
"regex",
"length",
"numeric",
"email",
"formatString",
"formatNumber",
"formatCurrency",
"formatDate",
"pluralize",
"openUrl",
"and",
"or",
"not",
]
BASIC_CATALOG_FUNCTIONS: frozenset[FunctionName] = frozenset(get_args(FunctionName))
IconNameV09 = Literal[
"accountCircle",
"add",
"arrowBack",
"arrowForward",
"attachFile",
"calendarToday",
"call",
"camera",
"check",
"close",
"delete",
"download",
"edit",
"event",
"error",
"fastForward",
"favorite",
"favoriteOff",
"folder",
"help",
"home",
"info",
"locationOn",
"lock",
"lockOpen",
"mail",
"menu",
"moreVert",
"moreHoriz",
"notificationsOff",
"notifications",
"pause",
"payment",
"person",
"phone",
"photo",
"play",
"print",
"refresh",
"rewind",
"search",
"send",
"settings",
"share",
"shoppingCart",
"skipNext",
"skipPrevious",
"star",
"starHalf",
"starOff",
"stop",
"upload",
"visibility",
"visibilityOff",
"volumeDown",
"volumeMute",
"volumeOff",
"volumeUp",
"warning",
]
V09_ICON_NAMES: frozenset[IconNameV09] = frozenset(get_args(IconNameV09))
class DataBinding(BaseModel):
"""JSON Pointer path reference to the data model."""
path: str = Field(description="A JSON Pointer path to a value in the data model.")
model_config = ConfigDict(extra="forbid")
class FunctionCall(BaseModel):
"""Client-side function invocation."""
call: str = Field(description="The name of the function to call.")
args: dict[str, DynamicValue] | None = Field(
default=None, description="Arguments passed to the function."
)
return_type: (
Literal["string", "number", "boolean", "array", "object", "any", "void"] | None
) = Field(
default=None,
alias="returnType",
description="Expected return type of the function call.",
)
model_config = ConfigDict(populate_by_name=True, extra="forbid")
DynamicValue = str | float | int | bool | list[Any] | DataBinding | FunctionCall
DynamicString = str | DataBinding | FunctionCall
DynamicNumber = float | int | DataBinding | FunctionCall
DynamicBoolean = bool | DataBinding | FunctionCall
DynamicStringList = list[str] | DataBinding | FunctionCall
class CheckRule(BaseModel):
"""A single validation rule for an input component."""
condition: DynamicBoolean = Field(
description="Condition that must evaluate to true for the check to pass."
)
message: str = Field(description="Error message displayed if the check fails.")
model_config = ConfigDict(extra="forbid")
class AccessibilityAttributes(BaseModel):
"""Accessibility attributes for assistive technologies."""
label: DynamicString | None = Field(
default=None, description="Short label for screen readers."
)
description: DynamicString | None = Field(
default=None, description="Extended description for screen readers."
)
model_config = ConfigDict(extra="forbid")
class ChildTemplate(BaseModel):
"""Template for generating dynamic children from a data model list."""
component_id: str = Field(
alias="componentId", description="Component to repeat per list item."
)
path: str = Field(description="Data model path to the list of items.")
model_config = ConfigDict(populate_by_name=True, extra="forbid")
ChildListV09 = list[str] | ChildTemplate
class EventAction(BaseModel):
"""Server-side event triggered by a component interaction."""
name: str = Field(description="Action name dispatched to the server.")
context: dict[str, DynamicValue] | None = Field(
default=None, description="Key-value pairs sent with the event."
)
model_config = ConfigDict(extra="forbid")
class ActionV09(BaseModel):
"""Interaction handler: server event or local function call.
Exactly one of ``event`` or ``function_call`` must be set.
"""
event: EventAction | None = Field(
default=None, description="Triggers a server-side event."
)
function_call: FunctionCall | None = Field(
default=None,
alias="functionCall",
description="Executes a local client-side function.",
)
model_config = ConfigDict(populate_by_name=True, extra="forbid")
@model_validator(mode="after")
def _check_exactly_one(self) -> ActionV09:
"""Enforce exactly one of event or functionCall."""
count = sum(f is not None for f in (self.event, self.function_call))
if count != 1:
raise ValueError(
f"Exactly one of event or functionCall must be set, got {count}"
)
return self
class TextV09(BaseModel):
"""Displays text content."""
id: str = Field(description="Unique component identifier.")
component: Literal["Text"] = "Text"
text: DynamicString = Field(description="Text content to display.")
variant: Literal["h1", "h2", "h3", "h4", "h5", "caption", "body"] | None = Field(
default=None, description="Semantic text style hint."
)
weight: float | None = Field(default=None, description="Flex weight.")
accessibility: AccessibilityAttributes | None = None
model_config = ConfigDict(extra="forbid")
class ImageV09(BaseModel):
"""Displays an image."""
id: str = Field(description="Unique component identifier.")
component: Literal["Image"] = "Image"
url: DynamicString = Field(description="Image source URL.")
description: DynamicString | None = Field(
default=None, description="Accessibility text."
)
fit: Literal["contain", "cover", "fill", "none", "scaleDown"] | None = Field(
default=None, description="Object-fit behavior."
)
variant: (
Literal[
"icon", "avatar", "smallFeature", "mediumFeature", "largeFeature", "header"
]
| None
) = Field(default=None, description="Image size hint.")
weight: float | None = Field(default=None, description="Flex weight.")
accessibility: AccessibilityAttributes | None = None
model_config = ConfigDict(extra="forbid")
class IconV09(BaseModel):
"""Displays a named icon."""
id: str = Field(description="Unique component identifier.")
component: Literal["Icon"] = "Icon"
name: IconNameV09 | DataBinding = Field(description="Icon name or data binding.")
weight: float | None = Field(default=None, description="Flex weight.")
accessibility: AccessibilityAttributes | None = None
model_config = ConfigDict(extra="forbid")
class VideoV09(BaseModel):
"""Displays a video player."""
id: str = Field(description="Unique component identifier.")
component: Literal["Video"] = "Video"
url: DynamicString = Field(description="Video source URL.")
weight: float | None = Field(default=None, description="Flex weight.")
accessibility: AccessibilityAttributes | None = None
model_config = ConfigDict(extra="forbid")
class AudioPlayerV09(BaseModel):
"""Displays an audio player."""
id: str = Field(description="Unique component identifier.")
component: Literal["AudioPlayer"] = "AudioPlayer"
url: DynamicString = Field(description="Audio source URL.")
description: DynamicString | None = Field(
default=None, description="Audio content description."
)
weight: float | None = Field(default=None, description="Flex weight.")
accessibility: AccessibilityAttributes | None = None
model_config = ConfigDict(extra="forbid")
class RowV09(BaseModel):
"""Horizontal layout container."""
id: str = Field(description="Unique component identifier.")
component: Literal["Row"] = "Row"
children: ChildListV09 = Field(description="Child components.")
justify: (
Literal[
"center",
"end",
"spaceAround",
"spaceBetween",
"spaceEvenly",
"start",
"stretch",
]
| None
) = Field(default=None, description="Main-axis distribution.")
align: Literal["start", "center", "end", "stretch"] | None = Field(
default=None, description="Cross-axis alignment."
)
weight: float | None = Field(default=None, description="Flex weight.")
accessibility: AccessibilityAttributes | None = None
model_config = ConfigDict(extra="forbid")
class ColumnV09(BaseModel):
"""Vertical layout container."""
id: str = Field(description="Unique component identifier.")
component: Literal["Column"] = "Column"
children: ChildListV09 = Field(description="Child components.")
justify: (
Literal[
"start",
"center",
"end",
"spaceBetween",
"spaceAround",
"spaceEvenly",
"stretch",
]
| None
) = Field(default=None, description="Main-axis distribution.")
align: Literal["center", "end", "start", "stretch"] | None = Field(
default=None, description="Cross-axis alignment."
)
weight: float | None = Field(default=None, description="Flex weight.")
accessibility: AccessibilityAttributes | None = None
model_config = ConfigDict(extra="forbid")
class ListV09(BaseModel):
"""Scrollable list container."""
id: str = Field(description="Unique component identifier.")
component: Literal["List"] = "List"
children: ChildListV09 = Field(description="Child components.")
direction: Literal["vertical", "horizontal"] | None = Field(
default=None, description="Scroll direction."
)
align: Literal["start", "center", "end", "stretch"] | None = Field(
default=None, description="Cross-axis alignment."
)
weight: float | None = Field(default=None, description="Flex weight.")
accessibility: AccessibilityAttributes | None = None
model_config = ConfigDict(extra="forbid")
class CardV09(BaseModel):
"""Card container wrapping a single child."""
id: str = Field(description="Unique component identifier.")
component: Literal["Card"] = "Card"
child: str = Field(description="ID of the child component.")
weight: float | None = Field(default=None, description="Flex weight.")
accessibility: AccessibilityAttributes | None = None
model_config = ConfigDict(extra="forbid")
class TabItemV09(BaseModel):
"""A single tab definition."""
title: DynamicString = Field(description="Tab title.")
child: str = Field(description="ID of the tab content component.")
model_config = ConfigDict(extra="forbid")
class TabsV09(BaseModel):
"""Tabbed navigation container."""
id: str = Field(description="Unique component identifier.")
component: Literal["Tabs"] = "Tabs"
tabs: list[TabItemV09] = Field(min_length=1, description="Tab definitions.")
weight: float | None = Field(default=None, description="Flex weight.")
accessibility: AccessibilityAttributes | None = None
model_config = ConfigDict(extra="forbid")
class ModalV09(BaseModel):
"""Modal dialog with a trigger and content."""
id: str = Field(description="Unique component identifier.")
component: Literal["Modal"] = "Modal"
trigger: str = Field(description="ID of the component that opens the modal.")
content: str = Field(description="ID of the component inside the modal.")
weight: float | None = Field(default=None, description="Flex weight.")
accessibility: AccessibilityAttributes | None = None
model_config = ConfigDict(extra="forbid")
class DividerV09(BaseModel):
"""Visual divider line."""
id: str = Field(description="Unique component identifier.")
component: Literal["Divider"] = "Divider"
axis: Literal["horizontal", "vertical"] | None = Field(
default=None, description="Divider orientation."
)
weight: float | None = Field(default=None, description="Flex weight.")
accessibility: AccessibilityAttributes | None = None
model_config = ConfigDict(extra="forbid")
class ButtonV09(BaseModel):
"""Interactive button."""
id: str = Field(description="Unique component identifier.")
component: Literal["Button"] = "Button"
child: str = Field(description="ID of the button label component.")
action: ActionV09 = Field(description="Action dispatched on click.")
variant: Literal["default", "primary", "borderless"] | None = Field(
default=None, description="Button style variant."
)
checks: list[CheckRule] | None = Field(
default=None, description="Validation rules."
)
weight: float | None = Field(default=None, description="Flex weight.")
accessibility: AccessibilityAttributes | None = None
model_config = ConfigDict(extra="forbid")
class TextFieldV09(BaseModel):
"""Text input field."""
id: str = Field(description="Unique component identifier.")
component: Literal["TextField"] = "TextField"
label: DynamicString = Field(description="Input label.")
value: DynamicString | None = Field(default=None, description="Current text value.")
variant: Literal["longText", "number", "shortText", "obscured"] | None = Field(
default=None, description="Input type variant."
)
validation_regexp: str | None = Field(
default=None,
alias="validationRegexp",
description="Regex for client-side validation.",
)
checks: list[CheckRule] | None = Field(
default=None, description="Validation rules."
)
weight: float | None = Field(default=None, description="Flex weight.")
accessibility: AccessibilityAttributes | None = None
model_config = ConfigDict(populate_by_name=True, extra="forbid")
class CheckBoxV09(BaseModel):
"""Checkbox input."""
id: str = Field(description="Unique component identifier.")
component: Literal["CheckBox"] = "CheckBox"
label: DynamicString = Field(description="Checkbox label.")
value: DynamicBoolean = Field(description="Checked state.")
checks: list[CheckRule] | None = Field(
default=None, description="Validation rules."
)
weight: float | None = Field(default=None, description="Flex weight.")
accessibility: AccessibilityAttributes | None = None
model_config = ConfigDict(extra="forbid")
class ChoicePickerOption(BaseModel):
"""A single option in a ChoicePicker."""
label: DynamicString = Field(description="Display label.")
value: str = Field(description="Value when selected.")
model_config = ConfigDict(extra="forbid")
class ChoicePickerV09(BaseModel):
"""Selection component replacing v0.8 MultipleChoice."""
id: str = Field(description="Unique component identifier.")
component: Literal["ChoicePicker"] = "ChoicePicker"
options: list[ChoicePickerOption] = Field(description="Available choices.")
value: DynamicStringList = Field(description="Currently selected values.")
label: DynamicString | None = Field(default=None, description="Group label.")
variant: Literal["multipleSelection", "mutuallyExclusive"] | None = Field(
default=None, description="Selection behavior."
)
display_style: Literal["checkbox", "chips"] | None = Field(
default=None, alias="displayStyle", description="Visual display style."
)
filterable: bool | None = Field(
default=None, description="Whether options can be filtered."
)
checks: list[CheckRule] | None = Field(
default=None, description="Validation rules."
)
weight: float | None = Field(default=None, description="Flex weight.")
accessibility: AccessibilityAttributes | None = None
model_config = ConfigDict(populate_by_name=True, extra="forbid")
class SliderV09(BaseModel):
"""Numeric slider input."""
id: str = Field(description="Unique component identifier.")
component: Literal["Slider"] = "Slider"
value: DynamicNumber = Field(description="Current slider value.")
max: float = Field(description="Maximum slider value.")
min: float | None = Field(default=None, description="Minimum slider value.")
label: DynamicString | None = Field(default=None, description="Slider label.")
checks: list[CheckRule] | None = Field(
default=None, description="Validation rules."
)
weight: float | None = Field(default=None, description="Flex weight.")
accessibility: AccessibilityAttributes | None = None
model_config = ConfigDict(extra="forbid")
class DateTimeInputV09(BaseModel):
"""Date and/or time picker."""
id: str = Field(description="Unique component identifier.")
component: Literal["DateTimeInput"] = "DateTimeInput"
value: DynamicString = Field(description="ISO 8601 date/time value.")
enable_date: bool | None = Field(
default=None, alias="enableDate", description="Enable date selection."
)
enable_time: bool | None = Field(
default=None, alias="enableTime", description="Enable time selection."
)
min: DynamicString | None = Field(
default=None, description="Minimum allowed date/time."
)
max: DynamicString | None = Field(
default=None, description="Maximum allowed date/time."
)
label: DynamicString | None = Field(default=None, description="Input label.")
checks: list[CheckRule] | None = Field(
default=None, description="Validation rules."
)
weight: float | None = Field(default=None, description="Flex weight.")
accessibility: AccessibilityAttributes | None = None
model_config = ConfigDict(populate_by_name=True, extra="forbid")
class Theme(BaseModel):
"""Surface theme configuration for v0.9.
Replaces v0.8 ``Styles``. Adds ``iconUrl`` and ``agentDisplayName``
for agent attribution; drops ``font``.
"""
primary_color: str | None = Field(
default=None,
alias="primaryColor",
pattern=r"^#[0-9a-fA-F]{6}$",
description="Primary brand color as a hex string.",
)
icon_url: str | None = Field(
default=None,
alias="iconUrl",
description="URL for an image identifying the agent or tool.",
)
agent_display_name: str | None = Field(
default=None,
alias="agentDisplayName",
description="Text label identifying the agent or tool.",
)
model_config = ConfigDict(populate_by_name=True, extra="allow")
class CreateSurface(BaseModel):
"""Signals the client to create a new surface and begin rendering.
Replaces v0.8 ``BeginRendering``. ``catalogId`` is now required and
``theme`` / ``sendDataModel`` are new.
"""
surface_id: str = Field(alias="surfaceId", description="Unique surface identifier.")
catalog_id: str = Field(
alias="catalogId", description="Catalog identifier for this surface."
)
theme: Theme | None = Field(default=None, description="Theme parameters.")
send_data_model: bool | None = Field(
default=None,
alias="sendDataModel",
description="If true, client sends data model in action metadata.",
)
model_config = ConfigDict(populate_by_name=True, extra="forbid")
class UpdateComponents(BaseModel):
"""Updates a surface with a new set of components.
Replaces v0.8 ``SurfaceUpdate``. Components use a flat structure where
``component`` is a type-name string and properties sit at the top level.
"""
surface_id: str = Field(alias="surfaceId", description="Target surface identifier.")
components: list[dict[str, Any]] = Field(
min_length=1, description="Components to render on the surface."
)
model_config = ConfigDict(populate_by_name=True, extra="forbid")
class UpdateDataModel(BaseModel):
"""Updates the data model for a surface.
Replaces v0.8 ``DataModelUpdate``. The ``contents`` adjacency list is
replaced by a single ``value`` of any JSON type. ``path`` uses JSON
Pointer syntax — e.g. ``/user/name``.
"""
surface_id: str = Field(alias="surfaceId", description="Target surface identifier.")
path: str | None = Field(
default=None, description="JSON Pointer path for the update."
)
value: Any = Field(
default=None, description="Value to set. Omit to delete the key."
)
model_config = ConfigDict(populate_by_name=True, extra="forbid")
class DeleteSurfaceV09(BaseModel):
"""Signals the client to delete a surface."""
surface_id: str = Field(alias="surfaceId", description="Surface to delete.")
model_config = ConfigDict(populate_by_name=True, extra="forbid")
class A2UIMessageV09(BaseModel):
"""Union wrapper for v0.9 server-to-client message types.
Exactly one message field must be set alongside the ``version`` field.
"""
version: Literal["v0.9"] = "v0.9"
create_surface: CreateSurface | None = Field(
default=None, alias="createSurface", description="Create a new surface."
)
update_components: UpdateComponents | None = Field(
default=None,
alias="updateComponents",
description="Update components on a surface.",
)
update_data_model: UpdateDataModel | None = Field(
default=None,
alias="updateDataModel",
description="Update the surface data model.",
)
delete_surface: DeleteSurfaceV09 | None = Field(
default=None, alias="deleteSurface", description="Delete a surface."
)
model_config = ConfigDict(populate_by_name=True, extra="forbid")
@model_validator(mode="after")
def _check_exactly_one(self) -> A2UIMessageV09:
"""Enforce the spec's exactly-one-of constraint."""
fields = [
self.create_surface,
self.update_components,
self.update_data_model,
self.delete_surface,
]
count = sum(f is not None for f in fields)
if count != 1:
raise ValueError(
f"Exactly one A2UI v0.9 message type must be set, got {count}"
)
return self
class ActionEvent(BaseModel):
"""User-initiated action from a component.
Replaces v0.8 ``UserAction``. The event field is renamed from
``userAction`` to ``action``.
"""
name: str = Field(description="Action name.")
surface_id: str = Field(alias="surfaceId", description="Source surface identifier.")
source_component_id: str = Field(
alias="sourceComponentId",
description="Component that triggered the action.",
)
timestamp: str = Field(description="ISO 8601 timestamp of the action.")
context: dict[str, Any] = Field(description="Resolved action context payload.")
model_config = ConfigDict(populate_by_name=True)
class ClientErrorV09(BaseModel):
"""Structured client-side error report.
Replaces v0.8's flexible ``ClientError`` with required ``code``,
``surfaceId``, and ``message`` fields.
"""
code: str = Field(description="Error code (e.g. VALIDATION_FAILED).")
surface_id: str = Field(
alias="surfaceId", description="Surface where the error occurred."
)
message: str = Field(description="Human-readable error description.")
path: str | None = Field(
default=None, description="JSON Pointer to the failing field."
)
model_config = ConfigDict(populate_by_name=True, extra="allow")
class A2UIEventV09(BaseModel):
"""Union wrapper for v0.9 client-to-server events."""
version: Literal["v0.9"] = "v0.9"
action: ActionEvent | None = Field(
default=None, description="User-initiated action event."
)
error: ClientErrorV09 | None = Field(
default=None, description="Client-side error report."
)
model_config = ConfigDict(populate_by_name=True)
@model_validator(mode="after")
def _check_exactly_one(self) -> A2UIEventV09:
"""Enforce the spec's exactly-one-of constraint."""
fields = [self.action, self.error]
count = sum(f is not None for f in fields)
if count != 1:
raise ValueError(
f"Exactly one A2UI v0.9 event type must be set, got {count}"
)
return self
class ClientDataModel(BaseModel):
"""Client data model payload for A2A message metadata.
When ``sendDataModel`` is ``true`` on ``createSurface``, the client
attaches this object to every outbound A2A message as
``a2uiClientDataModel`` in the metadata.
"""
version: Literal["v0.9"] = "v0.9"
surfaces: dict[str, dict[str, Any]] = Field(
description="Map of surface IDs to their current data models."
)
model_config = ConfigDict(extra="forbid")
_V09_KEYS = {"createSurface", "updateComponents", "updateDataModel", "deleteSurface"}
def extract_a2ui_v09_json_objects(text: str) -> list[dict[str, Any]]:
"""Extract JSON objects containing A2UI v0.9 keys from text.
Uses ``json.JSONDecoder.raw_decode`` for robust parsing that correctly
handles braces inside string literals.
"""
decoder = json.JSONDecoder()
results: list[dict[str, Any]] = []
idx = 0
while idx < len(text):
idx = text.find("{", idx)
if idx == -1:
break
try:
obj, end_idx = decoder.raw_decode(text, idx)
if isinstance(obj, dict) and _V09_KEYS & obj.keys():
results.append(obj)
idx = end_idx
except json.JSONDecodeError:
idx += 1
return results

View File

@@ -1,285 +0,0 @@
"""Validate A2UI message dicts via Pydantic models."""
from __future__ import annotations
from typing import Any
from pydantic import BaseModel, ValidationError
from crewai.a2a.extensions.a2ui.catalog import (
AudioPlayer,
Button,
Card,
CheckBox,
Column,
DateTimeInput,
Divider,
Icon,
Image,
List,
Modal,
MultipleChoice,
Row,
Slider,
Tabs,
Text,
TextField,
Video,
)
from crewai.a2a.extensions.a2ui.models import A2UIEvent, A2UIMessage
from crewai.a2a.extensions.a2ui.v0_9 import (
A2UIEventV09,
A2UIMessageV09,
AudioPlayerV09,
ButtonV09,
CardV09,
CheckBoxV09,
ChoicePickerV09,
ColumnV09,
DateTimeInputV09,
DividerV09,
IconV09,
ImageV09,
ListV09,
ModalV09,
RowV09,
SliderV09,
TabsV09,
TextFieldV09,
TextV09,
VideoV09,
)
_STANDARD_CATALOG_MODELS: dict[str, type[BaseModel]] = {
"AudioPlayer": AudioPlayer,
"Button": Button,
"Card": Card,
"CheckBox": CheckBox,
"Column": Column,
"DateTimeInput": DateTimeInput,
"Divider": Divider,
"Icon": Icon,
"Image": Image,
"List": List,
"Modal": Modal,
"MultipleChoice": MultipleChoice,
"Row": Row,
"Slider": Slider,
"Tabs": Tabs,
"Text": Text,
"TextField": TextField,
"Video": Video,
}
class A2UIValidationError(Exception):
"""Raised when an A2UI message fails validation."""
def __init__(self, message: str, errors: list[Any] | None = None) -> None:
super().__init__(message)
self.errors = errors or []
def validate_a2ui_message(
data: dict[str, Any],
*,
validate_catalog: bool = False,
) -> A2UIMessage:
"""Parse and validate an A2UI server-to-client message.
Args:
data: Raw JSON-decoded message dict.
validate_catalog: If True, also validate component properties
against the standard catalog.
Returns:
Validated ``A2UIMessage`` instance.
Raises:
A2UIValidationError: If the data does not conform to the A2UI schema.
"""
try:
message = A2UIMessage.model_validate(data)
except ValidationError as exc:
raise A2UIValidationError(
f"Invalid A2UI message: {exc.error_count()} validation error(s)",
errors=exc.errors(),
) from exc
if validate_catalog:
validate_catalog_components(message)
return message
def validate_a2ui_event(data: dict[str, Any]) -> A2UIEvent:
"""Parse and validate an A2UI client-to-server event.
Args:
data: Raw JSON-decoded event dict.
Returns:
Validated ``A2UIEvent`` instance.
Raises:
A2UIValidationError: If the data does not conform to the A2UI event schema.
"""
try:
return A2UIEvent.model_validate(data)
except ValidationError as exc:
raise A2UIValidationError(
f"Invalid A2UI event: {exc.error_count()} validation error(s)",
errors=exc.errors(),
) from exc
def validate_a2ui_message_v09(data: dict[str, Any]) -> A2UIMessageV09:
"""Parse and validate an A2UI v0.9 server-to-client message.
Args:
data: Raw JSON-decoded message dict.
Returns:
Validated ``A2UIMessageV09`` instance.
Raises:
A2UIValidationError: If the data does not conform to the v0.9 schema.
"""
try:
return A2UIMessageV09.model_validate(data)
except ValidationError as exc:
raise A2UIValidationError(
f"Invalid A2UI v0.9 message: {exc.error_count()} validation error(s)",
errors=exc.errors(),
) from exc
def validate_a2ui_event_v09(data: dict[str, Any]) -> A2UIEventV09:
"""Parse and validate an A2UI v0.9 client-to-server event.
Args:
data: Raw JSON-decoded event dict.
Returns:
Validated ``A2UIEventV09`` instance.
Raises:
A2UIValidationError: If the data does not conform to the v0.9 schema.
"""
try:
return A2UIEventV09.model_validate(data)
except ValidationError as exc:
raise A2UIValidationError(
f"Invalid A2UI v0.9 event: {exc.error_count()} validation error(s)",
errors=exc.errors(),
) from exc
def validate_catalog_components(message: A2UIMessage) -> None:
"""Validate component properties in a surfaceUpdate against the standard catalog.
Only applies to surfaceUpdate messages. Components whose type is not
in the standard catalog are skipped without error.
Args:
message: A validated A2UIMessage.
Raises:
A2UIValidationError: If any component fails catalog validation.
"""
if message.surface_update is None:
return
errors: list[Any] = []
for entry in message.surface_update.components:
for type_name, props in entry.component.items():
model = _STANDARD_CATALOG_MODELS.get(type_name)
if model is None:
continue
try:
model.model_validate(props)
except ValidationError as exc:
errors.extend(
{
"component_id": entry.id,
"component_type": type_name,
**err,
}
for err in exc.errors()
)
if errors:
raise A2UIValidationError(
f"Catalog validation failed: {len(errors)} error(s)",
errors=errors,
)
_V09_BASIC_CATALOG_MODELS: dict[str, type[BaseModel]] = {
"AudioPlayer": AudioPlayerV09,
"Button": ButtonV09,
"Card": CardV09,
"CheckBox": CheckBoxV09,
"ChoicePicker": ChoicePickerV09,
"Column": ColumnV09,
"DateTimeInput": DateTimeInputV09,
"Divider": DividerV09,
"Icon": IconV09,
"Image": ImageV09,
"List": ListV09,
"Modal": ModalV09,
"Row": RowV09,
"Slider": SliderV09,
"Tabs": TabsV09,
"Text": TextV09,
"TextField": TextFieldV09,
"Video": VideoV09,
}
def validate_catalog_components_v09(message: A2UIMessageV09) -> None:
"""Validate component properties in an updateComponents against the basic catalog.
v0.9 components use a flat structure where ``component`` is a type-name
string and properties sit at the top level of the component dict.
Only applies to updateComponents messages. Components whose type is not
in the basic catalog are skipped without error.
Args:
message: A validated A2UIMessageV09.
Raises:
A2UIValidationError: If any component fails catalog validation.
"""
if message.update_components is None:
return
errors: list[Any] = []
for entry in message.update_components.components:
if not isinstance(entry, dict):
continue
type_name = entry.get("component")
if not isinstance(type_name, str):
continue
model = _V09_BASIC_CATALOG_MODELS.get(type_name)
if model is None:
continue
try:
model.model_validate(entry)
except ValidationError as exc:
errors.extend(
{
"component_id": entry.get("id", "<unknown>"),
"component_type": type_name,
**err,
}
for err in exc.errors()
)
if errors:
raise A2UIValidationError(
f"v0.9 catalog validation failed: {len(errors)} error(s)",
errors=errors,
)

View File

@@ -1,269 +0,0 @@
"""Base extension interface for CrewAI A2A wrapper processing hooks.
This module defines the protocol for extending CrewAI's A2A wrapper functionality
with custom logic for tool injection, prompt augmentation, and response processing.
Note: These are CrewAI-specific processing hooks, NOT A2A protocol extensions.
A2A protocol extensions are capability declarations using AgentExtension objects
in AgentCard.capabilities.extensions, activated via the A2A-Extensions HTTP header.
See: https://a2a-protocol.org/latest/topics/extensions/
"""
from __future__ import annotations
from collections.abc import Sequence
from typing import TYPE_CHECKING, Annotated, Any, Protocol, runtime_checkable
from pydantic import BeforeValidator
if TYPE_CHECKING:
from a2a.types import Message
from crewai.agent.core import Agent
def _validate_a2a_extension(v: Any) -> Any:
"""Validate that value implements A2AExtension protocol."""
if not isinstance(v, A2AExtension):
raise ValueError(
f"Value must implement A2AExtension protocol. "
f"Got {type(v).__name__} which is missing required methods."
)
return v
ValidatedA2AExtension = Annotated[Any, BeforeValidator(_validate_a2a_extension)]
@runtime_checkable
class ConversationState(Protocol):
"""Protocol for extension-specific conversation state.
Extensions can define their own state classes that implement this protocol
to track conversation-specific data extracted from message history.
"""
def is_ready(self) -> bool:
"""Check if the state indicates readiness for some action.
Returns:
True if the state is ready, False otherwise.
"""
...
@runtime_checkable
class A2AExtension(Protocol):
"""Protocol for A2A wrapper extensions.
Extensions can implement this protocol to inject custom logic into
the A2A conversation flow at various integration points.
Example:
class MyExtension:
def inject_tools(self, agent: Agent) -> None:
pass
def extract_state_from_history(
self, conversation_history: Sequence[Message]
) -> ConversationState | None:
return None
def augment_prompt(
self, base_prompt: str, conversation_state: ConversationState | None
) -> str:
return base_prompt
def process_response(
self, agent_response: Any, conversation_state: ConversationState | None
) -> Any:
return agent_response
"""
def inject_tools(self, agent: Agent) -> None:
"""Inject extension-specific tools into the agent.
Called when an agent is wrapped with A2A capabilities. Extensions
can add tools that enable extension-specific functionality.
Args:
agent: The agent instance to inject tools into.
"""
...
def extract_state_from_history(
self, conversation_history: Sequence[Message]
) -> ConversationState | None:
"""Extract extension-specific state from conversation history.
Called during prompt augmentation to allow extensions to analyze
the conversation history and extract relevant state information.
Args:
conversation_history: The sequence of A2A messages exchanged.
Returns:
Extension-specific conversation state, or None if no relevant state.
"""
...
def augment_prompt(
self,
base_prompt: str,
conversation_state: ConversationState | None,
) -> str:
"""Augment the task prompt with extension-specific instructions.
Called during prompt augmentation to allow extensions to add
custom instructions based on conversation state.
Args:
base_prompt: The base prompt to augment.
conversation_state: Extension-specific state from extract_state_from_history.
Returns:
The augmented prompt with extension-specific instructions.
"""
...
def process_response(
self,
agent_response: Any,
conversation_state: ConversationState | None,
) -> Any:
"""Process and potentially modify the agent response.
Called after parsing the agent's response, allowing extensions to
enhance or modify the response based on conversation state.
Args:
agent_response: The parsed agent response.
conversation_state: Extension-specific state from extract_state_from_history.
Returns:
The processed agent response (may be modified or original).
"""
...
def prepare_message_metadata(
self,
conversation_state: ConversationState | None,
) -> dict[str, Any]:
"""Prepare extension-specific metadata for outbound A2A messages.
Called when constructing A2A messages to inject extension-specific
metadata such as client capabilities declarations.
Args:
conversation_state: Extension-specific state from extract_state_from_history.
Returns:
Dict of metadata key-value pairs to merge into the message metadata.
"""
...
class ExtensionRegistry:
"""Registry for managing A2A extensions.
Maintains a collection of extensions and provides methods to invoke
their hooks at various integration points.
"""
def __init__(self) -> None:
"""Initialize the extension registry."""
self._extensions: list[A2AExtension] = []
def register(self, extension: A2AExtension) -> None:
"""Register an extension.
Args:
extension: The extension to register.
"""
self._extensions.append(extension)
def inject_all_tools(self, agent: Agent) -> None:
"""Inject tools from all registered extensions.
Args:
agent: The agent instance to inject tools into.
"""
for extension in self._extensions:
extension.inject_tools(agent)
def extract_all_states(
self, conversation_history: Sequence[Message]
) -> dict[type[A2AExtension], ConversationState]:
"""Extract conversation states from all registered extensions.
Args:
conversation_history: The sequence of A2A messages exchanged.
Returns:
Mapping of extension types to their conversation states.
"""
states: dict[type[A2AExtension], ConversationState] = {}
for extension in self._extensions:
state = extension.extract_state_from_history(conversation_history)
if state is not None:
states[type(extension)] = state
return states
def augment_prompt_with_all(
self,
base_prompt: str,
extension_states: dict[type[A2AExtension], ConversationState],
) -> str:
"""Augment prompt with instructions from all registered extensions.
Args:
base_prompt: The base prompt to augment.
extension_states: Mapping of extension types to conversation states.
Returns:
The fully augmented prompt.
"""
augmented = base_prompt
for extension in self._extensions:
state = extension_states.get(type(extension))
augmented = extension.augment_prompt(augmented, state)
return augmented
def process_response_with_all(
self,
agent_response: Any,
extension_states: dict[type[A2AExtension], ConversationState],
) -> Any:
"""Process response through all registered extensions.
Args:
agent_response: The parsed agent response.
extension_states: Mapping of extension types to conversation states.
Returns:
The processed agent response.
"""
processed = agent_response
for extension in self._extensions:
state = extension_states.get(type(extension))
processed = extension.process_response(processed, state)
return processed
def prepare_all_metadata(
self,
extension_states: dict[type[A2AExtension], ConversationState],
) -> dict[str, Any]:
"""Collect metadata from all registered extensions for outbound messages.
Args:
extension_states: Mapping of extension types to conversation states.
Returns:
Merged metadata dict from all extensions.
"""
metadata: dict[str, Any] = {}
for extension in self._extensions:
state = extension_states.get(type(extension))
metadata.update(extension.prepare_message_metadata(state))
return metadata

View File

@@ -1,170 +0,0 @@
"""A2A Protocol extension utilities.
This module provides utilities for working with A2A protocol extensions as
defined in the A2A specification. Extensions are capability declarations in
AgentCard.capabilities.extensions using AgentExtension objects, activated
via the X-A2A-Extensions HTTP header.
See: https://a2a-protocol.org/latest/topics/extensions/
"""
from __future__ import annotations
from typing import Any
from a2a.client.middleware import ClientCallContext, ClientCallInterceptor
from a2a.extensions.common import (
HTTP_EXTENSION_HEADER,
)
from a2a.types import AgentCard, AgentExtension
from crewai.a2a.config import A2AClientConfig, A2AConfig
from crewai.a2a.extensions.base import ExtensionRegistry
def get_extensions_from_config(
a2a_config: list[A2AConfig | A2AClientConfig] | A2AConfig | A2AClientConfig,
) -> list[str]:
"""Extract extension URIs from A2A configuration.
Args:
a2a_config: A2A configuration (single or list).
Returns:
Deduplicated list of extension URIs from all configs.
"""
configs = a2a_config if isinstance(a2a_config, list) else [a2a_config]
seen: set[str] = set()
result: list[str] = []
for config in configs:
if not isinstance(config, A2AClientConfig):
continue
for uri in config.extensions:
if uri not in seen:
seen.add(uri)
result.append(uri)
return result
class ExtensionsMiddleware(ClientCallInterceptor):
"""Middleware to add X-A2A-Extensions header to requests.
This middleware adds the extensions header to all outgoing requests,
declaring which A2A protocol extensions the client supports.
"""
def __init__(self, extensions: list[str]) -> None:
"""Initialize with extension URIs.
Args:
extensions: List of extension URIs the client supports.
"""
self._extensions = extensions
async def intercept(
self,
method_name: str,
request_payload: dict[str, Any],
http_kwargs: dict[str, Any],
agent_card: AgentCard | None,
context: ClientCallContext | None,
) -> tuple[dict[str, Any], dict[str, Any]]:
"""Add extensions header to the request.
Args:
method_name: The A2A method being called.
request_payload: The JSON-RPC request payload.
http_kwargs: HTTP request kwargs (headers, etc).
agent_card: The target agent's card.
context: Optional call context.
Returns:
Tuple of (request_payload, modified_http_kwargs).
"""
if self._extensions:
headers = http_kwargs.setdefault("headers", {})
headers[HTTP_EXTENSION_HEADER] = ",".join(self._extensions)
return request_payload, http_kwargs
def validate_required_extensions(
agent_card: AgentCard,
client_extensions: list[str] | None,
) -> list[AgentExtension]:
"""Validate that client supports all required extensions from agent.
Args:
agent_card: The agent's card with declared extensions.
client_extensions: Extension URIs the client supports.
Returns:
List of unsupported required extensions.
Raises:
None - returns list of unsupported extensions for caller to handle.
"""
unsupported: list[AgentExtension] = []
client_set = set(client_extensions or [])
if not agent_card.capabilities or not agent_card.capabilities.extensions:
return unsupported
unsupported.extend(
ext
for ext in agent_card.capabilities.extensions
if ext.required and ext.uri not in client_set
)
return unsupported
def create_extension_registry_from_config(
a2a_config: list[A2AConfig | A2AClientConfig] | A2AConfig | A2AClientConfig,
) -> ExtensionRegistry:
"""Create an extension registry from A2A client configuration.
Extracts client_extensions from each A2AClientConfig and registers them
with the ExtensionRegistry. These extensions provide CrewAI-specific
processing hooks (tool injection, prompt augmentation, response processing).
Note: A2A protocol extensions (URI strings sent via X-A2A-Extensions header)
are handled separately via get_extensions_from_config() and ExtensionsMiddleware.
Args:
a2a_config: A2A configuration (single or list).
Returns:
Extension registry with all client_extensions registered.
Example:
class LoggingExtension:
def inject_tools(self, agent): pass
def extract_state_from_history(self, history): return None
def augment_prompt(self, prompt, state): return prompt
def process_response(self, response, state):
print(f"Response: {response}")
return response
config = A2AClientConfig(
endpoint="https://agent.example.com",
client_extensions=[LoggingExtension()],
)
registry = create_extension_registry_from_config(config)
"""
registry = ExtensionRegistry()
configs = a2a_config if isinstance(a2a_config, list) else [a2a_config]
seen: set[int] = set()
for config in configs:
if isinstance(config, (A2AConfig, A2AClientConfig)):
client_exts = getattr(config, "client_extensions", [])
for extension in client_exts:
ext_id = id(extension)
if ext_id not in seen:
seen.add(ext_id)
registry.register(extension)
return registry

View File

@@ -1,305 +0,0 @@
"""A2A protocol server extensions for CrewAI agents.
This module provides the base class and context for implementing A2A protocol
extensions on the server side. Extensions allow agents to offer additional
functionality beyond the core A2A specification.
See: https://a2a-protocol.org/latest/topics/extensions/
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
import logging
from typing import TYPE_CHECKING, Annotated, Any
from a2a.types import AgentExtension
from pydantic_core import CoreSchema, core_schema
if TYPE_CHECKING:
from a2a.server.context import ServerCallContext
from pydantic import GetCoreSchemaHandler
logger = logging.getLogger(__name__)
@dataclass
class ExtensionContext:
"""Context passed to extension hooks during request processing.
Provides access to request metadata, client extensions, and shared state
that extensions can read from and write to.
Attributes:
metadata: Request metadata dict, includes extension-namespaced keys.
client_extensions: Set of extension URIs the client declared support for.
state: Mutable dict for extensions to share data during request lifecycle.
server_context: The underlying A2A server call context.
"""
metadata: dict[str, Any]
client_extensions: set[str]
state: dict[str, Any] = field(default_factory=dict)
server_context: ServerCallContext | None = None
def get_extension_metadata(self, uri: str, key: str) -> Any | None:
"""Get extension-specific metadata value.
Extension metadata uses namespaced keys in the format:
"{extension_uri}/{key}"
Args:
uri: The extension URI.
key: The metadata key within the extension namespace.
Returns:
The metadata value, or None if not present.
"""
full_key = f"{uri}/{key}"
return self.metadata.get(full_key)
def set_extension_metadata(self, uri: str, key: str, value: Any) -> None:
"""Set extension-specific metadata value.
Args:
uri: The extension URI.
key: The metadata key within the extension namespace.
value: The value to set.
"""
full_key = f"{uri}/{key}"
self.metadata[full_key] = value
class ServerExtension(ABC):
"""Base class for A2A protocol server extensions.
Subclass this to create custom extensions that modify agent behavior
when clients activate them. Extensions are identified by URI and can
be marked as required.
Example:
class SamplingExtension(ServerExtension):
uri = "urn:crewai:ext:sampling/v1"
required = True
def __init__(self, max_tokens: int = 4096):
self.max_tokens = max_tokens
@property
def params(self) -> dict[str, Any]:
return {"max_tokens": self.max_tokens}
async def on_request(self, context: ExtensionContext) -> None:
limit = context.get_extension_metadata(self.uri, "limit")
if limit:
context.state["token_limit"] = int(limit)
async def on_response(self, context: ExtensionContext, result: Any) -> Any:
return result
"""
uri: Annotated[str, "Extension URI identifier. Must be unique."]
required: Annotated[bool, "Whether clients must support this extension."] = False
description: Annotated[
str | None, "Human-readable description of the extension."
] = None
@classmethod
def __get_pydantic_core_schema__(
cls,
_source_type: Any,
_handler: GetCoreSchemaHandler,
) -> CoreSchema:
"""Tell Pydantic how to validate ServerExtension instances."""
return core_schema.is_instance_schema(cls)
@property
def params(self) -> dict[str, Any] | None:
"""Extension parameters to advertise in AgentCard.
Override this property to expose configuration that clients can read.
Returns:
Dict of parameter names to values, or None.
"""
return None
def agent_extension(self) -> AgentExtension:
"""Generate the AgentExtension object for the AgentCard.
Returns:
AgentExtension with this extension's URI, required flag, and params.
"""
return AgentExtension(
uri=self.uri,
required=self.required if self.required else None,
description=self.description,
params=self.params,
)
def is_active(self, context: ExtensionContext) -> bool:
"""Check if this extension is active for the current request.
An extension is active if the client declared support for it.
Args:
context: The extension context for the current request.
Returns:
True if the client supports this extension.
"""
return self.uri in context.client_extensions
@abstractmethod
async def on_request(self, context: ExtensionContext) -> None:
"""Called before agent execution if extension is active.
Use this hook to:
- Read extension-specific metadata from the request
- Set up state for the execution
- Modify execution parameters via context.state
Args:
context: The extension context with request metadata and state.
"""
...
@abstractmethod
async def on_response(self, context: ExtensionContext, result: Any) -> Any:
"""Called after agent execution if extension is active.
Use this hook to:
- Modify or enhance the result
- Add extension-specific metadata to the response
- Clean up any resources
Args:
context: The extension context with request metadata and state.
result: The agent execution result.
Returns:
The result, potentially modified.
"""
...
class ServerExtensionRegistry:
"""Registry for managing server-side A2A protocol extensions.
Collects extensions and provides methods to generate AgentCapabilities
and invoke extension hooks during request processing.
"""
def __init__(self, extensions: list[ServerExtension] | None = None) -> None:
"""Initialize the registry with optional extensions.
Args:
extensions: Initial list of extensions to register.
"""
self._extensions: list[ServerExtension] = list(extensions) if extensions else []
self._by_uri: dict[str, ServerExtension] = {
ext.uri: ext for ext in self._extensions
}
def register(self, extension: ServerExtension) -> None:
"""Register an extension.
Args:
extension: The extension to register.
Raises:
ValueError: If an extension with the same URI is already registered.
"""
if extension.uri in self._by_uri:
raise ValueError(f"Extension already registered: {extension.uri}")
self._extensions.append(extension)
self._by_uri[extension.uri] = extension
def get_agent_extensions(self) -> list[AgentExtension]:
"""Get AgentExtension objects for all registered extensions.
Returns:
List of AgentExtension objects for the AgentCard.
"""
return [ext.agent_extension() for ext in self._extensions]
def get_extension(self, uri: str) -> ServerExtension | None:
"""Get an extension by URI.
Args:
uri: The extension URI.
Returns:
The extension, or None if not found.
"""
return self._by_uri.get(uri)
@staticmethod
def create_context(
metadata: dict[str, Any],
client_extensions: set[str],
server_context: ServerCallContext | None = None,
) -> ExtensionContext:
"""Create an ExtensionContext for a request.
Args:
metadata: Request metadata dict.
client_extensions: Set of extension URIs from client.
server_context: Optional server call context.
Returns:
ExtensionContext for use in hooks.
"""
return ExtensionContext(
metadata=metadata,
client_extensions=client_extensions,
server_context=server_context,
)
async def invoke_on_request(self, context: ExtensionContext) -> None:
"""Invoke on_request hooks for all active extensions.
Tracks activated extensions and isolates errors from individual hooks.
Args:
context: The extension context for the request.
"""
for extension in self._extensions:
if extension.is_active(context):
try:
await extension.on_request(context)
if context.server_context is not None:
context.server_context.activated_extensions.add(extension.uri)
except Exception:
logger.exception(
"Extension on_request hook failed",
extra={"extension": extension.uri},
)
async def invoke_on_response(self, context: ExtensionContext, result: Any) -> Any:
"""Invoke on_response hooks for all active extensions.
Isolates errors from individual hooks to prevent one failing extension
from breaking the entire response.
Args:
context: The extension context for the request.
result: The agent execution result.
Returns:
The result after all extensions have processed it.
"""
processed = result
for extension in self._extensions:
if extension.is_active(context):
try:
processed = await extension.on_response(context, processed)
except Exception:
logger.exception(
"Extension on_response hook failed",
extra={"extension": extension.uri},
)
return processed

View File

@@ -1,480 +0,0 @@
"""Helper functions for processing A2A task results."""
from __future__ import annotations
from collections.abc import AsyncIterator
from typing import TYPE_CHECKING, Any
import uuid
from a2a.client.errors import A2AClientHTTPError
from a2a.types import (
AgentCard,
Message,
Part,
Role,
Task,
TaskArtifactUpdateEvent,
TaskState,
TaskStatusUpdateEvent,
TextPart,
)
from typing_extensions import NotRequired, TypedDict
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.a2a_events import (
A2AConnectionErrorEvent,
A2AResponseReceivedEvent,
)
if TYPE_CHECKING:
from a2a.types import Task as A2ATask
SendMessageEvent = (
tuple[Task, TaskStatusUpdateEvent | TaskArtifactUpdateEvent | None] | Message
)
TERMINAL_STATES: frozenset[TaskState] = frozenset(
{
TaskState.completed,
TaskState.failed,
TaskState.rejected,
TaskState.canceled,
}
)
ACTIONABLE_STATES: frozenset[TaskState] = frozenset(
{
TaskState.input_required,
TaskState.auth_required,
}
)
PENDING_STATES: frozenset[TaskState] = frozenset(
{
TaskState.submitted,
TaskState.working,
}
)
class TaskStateResult(TypedDict):
"""Result dictionary from processing A2A task state."""
status: TaskState
history: list[Message]
result: NotRequired[str]
error: NotRequired[str]
agent_card: NotRequired[dict[str, Any]]
a2a_agent_name: NotRequired[str | None]
def extract_task_result_parts(a2a_task: A2ATask) -> list[str]:
"""Extract result parts from A2A task status message, history, and artifacts.
Args:
a2a_task: A2A Task object with status, history, and artifacts
Returns:
List of result text parts
"""
result_parts: list[str] = []
if a2a_task.status and a2a_task.status.message:
msg = a2a_task.status.message
result_parts.extend(
part.root.text for part in msg.parts if part.root.kind == "text"
)
if not result_parts and a2a_task.history:
for history_msg in reversed(a2a_task.history):
if history_msg.role == Role.agent:
result_parts.extend(
part.root.text
for part in history_msg.parts
if part.root.kind == "text"
)
break
if a2a_task.artifacts:
result_parts.extend(
part.root.text
for artifact in a2a_task.artifacts
for part in artifact.parts
if part.root.kind == "text"
)
return result_parts
def extract_error_message(a2a_task: A2ATask, default: str) -> str:
"""Extract error message from A2A task.
Args:
a2a_task: A2A Task object
default: Default message if no error found
Returns:
Error message string
"""
if a2a_task.status and a2a_task.status.message:
msg = a2a_task.status.message
if msg:
for part in msg.parts:
if part.root.kind == "text":
return str(part.root.text)
return str(msg)
if a2a_task.history:
for history_msg in reversed(a2a_task.history):
for part in history_msg.parts:
if part.root.kind == "text":
return str(part.root.text)
return default
def process_task_state(
a2a_task: A2ATask,
new_messages: list[Message],
agent_card: AgentCard,
turn_number: int,
is_multiturn: bool,
agent_role: str | None,
result_parts: list[str] | None = None,
endpoint: str | None = None,
a2a_agent_name: str | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
is_final: bool = True,
) -> TaskStateResult | None:
"""Process A2A task state and return result dictionary.
Shared logic for both polling and streaming handlers.
Args:
a2a_task: The A2A task to process.
new_messages: List to collect messages (modified in place).
agent_card: The agent card.
turn_number: Current turn number.
is_multiturn: Whether multi-turn conversation.
agent_role: Agent role for logging.
result_parts: Accumulated result parts (streaming passes accumulated,
polling passes None to extract from task).
endpoint: A2A agent endpoint URL.
a2a_agent_name: Name of the A2A agent from agent card.
from_task: Optional CrewAI Task for event metadata.
from_agent: Optional CrewAI Agent for event metadata.
is_final: Whether this is the final response in the stream.
Returns:
Result dictionary if terminal/actionable state, None otherwise.
"""
if result_parts is None:
result_parts = []
if a2a_task.status.state == TaskState.completed:
if not result_parts:
extracted_parts = extract_task_result_parts(a2a_task)
result_parts.extend(extracted_parts)
if a2a_task.history:
new_messages.extend(a2a_task.history)
response_text = " ".join(result_parts) if result_parts else ""
message_id = None
if a2a_task.status and a2a_task.status.message:
message_id = a2a_task.status.message.message_id
crewai_event_bus.emit(
None,
A2AResponseReceivedEvent(
response=response_text,
turn_number=turn_number,
context_id=a2a_task.context_id,
message_id=message_id,
is_multiturn=is_multiturn,
status="completed",
final=is_final,
agent_role=agent_role,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
from_task=from_task,
from_agent=from_agent,
),
)
return TaskStateResult(
status=TaskState.completed,
agent_card=agent_card.model_dump(exclude_none=True),
result=response_text,
history=new_messages,
)
if a2a_task.status.state == TaskState.input_required:
if a2a_task.history:
new_messages.extend(a2a_task.history)
response_text = extract_error_message(a2a_task, "Additional input required")
if response_text and not a2a_task.history:
agent_message = Message(
role=Role.agent,
message_id=str(uuid.uuid4()),
parts=[Part(root=TextPart(text=response_text))],
context_id=a2a_task.context_id,
task_id=a2a_task.id,
)
new_messages.append(agent_message)
input_message_id = None
if a2a_task.status and a2a_task.status.message:
input_message_id = a2a_task.status.message.message_id
crewai_event_bus.emit(
None,
A2AResponseReceivedEvent(
response=response_text,
turn_number=turn_number,
context_id=a2a_task.context_id,
message_id=input_message_id,
is_multiturn=is_multiturn,
status="input_required",
final=is_final,
agent_role=agent_role,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
from_task=from_task,
from_agent=from_agent,
),
)
return TaskStateResult(
status=TaskState.input_required,
error=response_text,
history=new_messages,
agent_card=agent_card.model_dump(exclude_none=True),
)
if a2a_task.status.state in {TaskState.failed, TaskState.rejected}:
error_msg = extract_error_message(a2a_task, "Task failed without error message")
if a2a_task.history:
new_messages.extend(a2a_task.history)
return TaskStateResult(
status=TaskState.failed,
error=error_msg,
history=new_messages,
)
if a2a_task.status.state == TaskState.auth_required:
error_msg = extract_error_message(a2a_task, "Authentication required")
return TaskStateResult(
status=TaskState.auth_required,
error=error_msg,
history=new_messages,
)
if a2a_task.status.state == TaskState.canceled:
error_msg = extract_error_message(a2a_task, "Task was canceled")
return TaskStateResult(
status=TaskState.canceled,
error=error_msg,
history=new_messages,
)
if a2a_task.status.state in PENDING_STATES:
return None
return None
async def send_message_and_get_task_id(
event_stream: AsyncIterator[SendMessageEvent],
new_messages: list[Message],
agent_card: AgentCard,
turn_number: int,
is_multiturn: bool,
agent_role: str | None,
from_task: Any | None = None,
from_agent: Any | None = None,
endpoint: str | None = None,
a2a_agent_name: str | None = None,
context_id: str | None = None,
) -> str | TaskStateResult:
"""Send message and process initial response.
Handles the common pattern of sending a message and either:
- Getting an immediate Message response (task completed synchronously)
- Getting a Task that needs polling/waiting for completion
Args:
event_stream: Async iterator from client.send_message()
new_messages: List to collect messages (modified in place)
agent_card: The agent card
turn_number: Current turn number
is_multiturn: Whether multi-turn conversation
agent_role: Agent role for logging
from_task: Optional CrewAI Task object for event metadata.
from_agent: Optional CrewAI Agent object for event metadata.
endpoint: Optional A2A endpoint URL.
a2a_agent_name: Optional A2A agent name.
context_id: Optional A2A context ID for correlation.
Returns:
Task ID string if agent needs polling/waiting, or TaskStateResult if done.
"""
try:
async for event in event_stream:
if isinstance(event, Message):
new_messages.append(event)
result_parts = [
part.root.text for part in event.parts if part.root.kind == "text"
]
response_text = " ".join(result_parts) if result_parts else ""
crewai_event_bus.emit(
None,
A2AResponseReceivedEvent(
response=response_text,
turn_number=turn_number,
context_id=event.context_id,
message_id=event.message_id,
is_multiturn=is_multiturn,
status="completed",
final=True,
agent_role=agent_role,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
from_task=from_task,
from_agent=from_agent,
),
)
return TaskStateResult(
status=TaskState.completed,
result=response_text,
history=new_messages,
agent_card=agent_card.model_dump(exclude_none=True),
)
if isinstance(event, tuple):
a2a_task, _ = event
if a2a_task.status.state in TERMINAL_STATES | ACTIONABLE_STATES:
result = process_task_state(
a2a_task=a2a_task,
new_messages=new_messages,
agent_card=agent_card,
turn_number=turn_number,
is_multiturn=is_multiturn,
agent_role=agent_role,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
from_task=from_task,
from_agent=from_agent,
)
if result:
return result
return a2a_task.id
return TaskStateResult(
status=TaskState.failed,
error="No task ID received from initial message",
history=new_messages,
)
except A2AClientHTTPError as e:
error_msg = f"HTTP Error {e.status_code}: {e!s}"
error_message = Message(
role=Role.agent,
message_id=str(uuid.uuid4()),
parts=[Part(root=TextPart(text=error_msg))],
context_id=context_id,
)
new_messages.append(error_message)
crewai_event_bus.emit(
None,
A2AConnectionErrorEvent(
endpoint=endpoint or "",
error=str(e),
error_type="http_error",
status_code=e.status_code,
a2a_agent_name=a2a_agent_name,
operation="send_message",
context_id=context_id,
from_task=from_task,
from_agent=from_agent,
),
)
crewai_event_bus.emit(
None,
A2AResponseReceivedEvent(
response=error_msg,
turn_number=turn_number,
context_id=context_id,
is_multiturn=is_multiturn,
status="failed",
final=True,
agent_role=agent_role,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
from_task=from_task,
from_agent=from_agent,
),
)
return TaskStateResult(
status=TaskState.failed,
error=error_msg,
history=new_messages,
)
except Exception as e:
error_msg = f"Unexpected error during send_message: {e!s}"
error_message = Message(
role=Role.agent,
message_id=str(uuid.uuid4()),
parts=[Part(root=TextPart(text=error_msg))],
context_id=context_id,
)
new_messages.append(error_message)
crewai_event_bus.emit(
None,
A2AConnectionErrorEvent(
endpoint=endpoint or "",
error=str(e),
error_type="unexpected_error",
a2a_agent_name=a2a_agent_name,
operation="send_message",
context_id=context_id,
from_task=from_task,
from_agent=from_agent,
),
)
crewai_event_bus.emit(
None,
A2AResponseReceivedEvent(
response=error_msg,
turn_number=turn_number,
context_id=context_id,
is_multiturn=is_multiturn,
status="failed",
final=True,
agent_role=agent_role,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
from_task=from_task,
from_agent=from_agent,
),
)
return TaskStateResult(
status=TaskState.failed,
error=error_msg,
history=new_messages,
)
finally:
aclose = getattr(event_stream, "aclose", None)
if aclose:
await aclose()

View File

@@ -1,55 +0,0 @@
"""String templates for A2A (Agent-to-Agent) protocol messaging and status."""
from string import Template
from typing import Final
AVAILABLE_AGENTS_TEMPLATE: Final[Template] = Template(
"\n<AVAILABLE_A2A_AGENTS>\n $available_a2a_agents\n</AVAILABLE_A2A_AGENTS>\n"
)
PREVIOUS_A2A_CONVERSATION_TEMPLATE: Final[Template] = Template(
"\n<PREVIOUS_A2A_CONVERSATION>\n"
" $previous_a2a_conversation"
"\n</PREVIOUS_A2A_CONVERSATION>\n"
)
CONVERSATION_TURN_INFO_TEMPLATE: Final[Template] = Template(
"\n<CONVERSATION_PROGRESS>\n"
' turn="$turn_count"\n'
' max_turns="$max_turns"\n'
" $warning"
"\n</CONVERSATION_PROGRESS>\n"
)
UNAVAILABLE_AGENTS_NOTICE_TEMPLATE: Final[Template] = Template(
"\n<A2A_AGENTS_STATUS>\n"
" NOTE: A2A agents were configured but are currently unavailable.\n"
" You cannot delegate to remote agents for this task.\n\n"
" Unavailable Agents:\n"
" $unavailable_agents"
"\n</A2A_AGENTS_STATUS>\n"
)
REMOTE_AGENT_COMPLETED_NOTICE: Final[str] = """
<REMOTE_AGENT_STATUS>
STATUS: COMPLETED
The remote agent has finished processing your request. Their response is in the conversation history above.
You MUST now:
1. Extract the answer from the conversation history
2. Set is_a2a=false
3. Return the answer as your final message
DO NOT send another request - the task is already done.
</REMOTE_AGENT_STATUS>
"""
REMOTE_AGENT_RESPONSE_NOTICE: Final[str] = """
<REMOTE_AGENT_STATUS>
STATUS: RESPONSE_RECEIVED
The remote agent has responded. Their response is in the conversation history above.
You MUST now:
1. Set is_a2a=false (the remote task is complete and cannot receive more messages)
2. Provide YOUR OWN response to the original task based on the information received
IMPORTANT: Your response should be addressed to the USER who gave you the original task.
Report what the remote agent told you in THIRD PERSON (e.g., "The remote agent said..." or "I learned that...").
Do NOT address the remote agent directly or use "you" to refer to them.
</REMOTE_AGENT_STATUS>
"""

View File

@@ -1,103 +0,0 @@
"""Type definitions for A2A protocol message parts."""
from __future__ import annotations
from typing import (
Annotated,
Any,
Literal,
Protocol,
runtime_checkable,
)
from pydantic import BeforeValidator, HttpUrl, TypeAdapter
from typing_extensions import NotRequired, TypedDict
try:
from crewai.a2a.updates import (
PollingConfig,
PollingHandler,
PushNotificationConfig,
PushNotificationHandler,
StreamingConfig,
StreamingHandler,
UpdateConfig,
)
except ImportError:
PollingConfig = Any # type: ignore[misc,assignment]
PollingHandler = Any # type: ignore[misc,assignment]
PushNotificationConfig = Any # type: ignore[misc,assignment]
PushNotificationHandler = Any # type: ignore[misc,assignment]
StreamingConfig = Any # type: ignore[misc,assignment]
StreamingHandler = Any # type: ignore[misc,assignment]
UpdateConfig = Any # type: ignore[misc,assignment]
TransportType = Literal["JSONRPC", "GRPC", "HTTP+JSON"]
ProtocolVersion = Literal[
"0.2.0",
"0.2.1",
"0.2.2",
"0.2.3",
"0.2.4",
"0.2.5",
"0.2.6",
"0.3.0",
"0.4.0",
]
http_url_adapter: TypeAdapter[HttpUrl] = TypeAdapter(HttpUrl)
Url = Annotated[
str,
BeforeValidator(
lambda value: str(http_url_adapter.validate_python(value, strict=True))
),
]
@runtime_checkable
class AgentResponseProtocol(Protocol):
"""Protocol for the dynamically created AgentResponse model."""
a2a_ids: tuple[str, ...]
message: str
is_a2a: bool
class PartsMetadataDict(TypedDict, total=False):
"""Metadata for A2A message parts.
Attributes:
mimeType: MIME type for the part content.
schema: JSON schema for the part content.
"""
mimeType: Literal["application/json"]
schema: dict[str, Any]
class PartsDict(TypedDict):
"""A2A message part containing text and optional metadata.
Attributes:
text: The text content of the message part.
metadata: Optional metadata describing the part content.
"""
text: str
metadata: NotRequired[PartsMetadataDict]
PollingHandlerType = type[PollingHandler]
StreamingHandlerType = type[StreamingHandler]
PushNotificationHandlerType = type[PushNotificationHandler]
HandlerType = PollingHandlerType | StreamingHandlerType | PushNotificationHandlerType
HANDLER_REGISTRY: dict[type[UpdateConfig], HandlerType] = {
PollingConfig: PollingHandler,
StreamingConfig: StreamingHandler,
PushNotificationConfig: PushNotificationHandler,
}

View File

@@ -1,35 +0,0 @@
"""A2A update mechanism configuration types."""
from crewai.a2a.updates.base import (
BaseHandlerKwargs,
PollingHandlerKwargs,
PushNotificationHandlerKwargs,
PushNotificationResultStore,
StreamingHandlerKwargs,
UpdateHandler,
)
from crewai.a2a.updates.polling.config import PollingConfig
from crewai.a2a.updates.polling.handler import PollingHandler
from crewai.a2a.updates.push_notifications.config import PushNotificationConfig
from crewai.a2a.updates.push_notifications.handler import PushNotificationHandler
from crewai.a2a.updates.streaming.config import StreamingConfig
from crewai.a2a.updates.streaming.handler import StreamingHandler
UpdateConfig = PollingConfig | StreamingConfig | PushNotificationConfig
__all__ = [
"BaseHandlerKwargs",
"PollingConfig",
"PollingHandler",
"PollingHandlerKwargs",
"PushNotificationConfig",
"PushNotificationHandler",
"PushNotificationHandlerKwargs",
"PushNotificationResultStore",
"StreamingConfig",
"StreamingHandler",
"StreamingHandlerKwargs",
"UpdateConfig",
"UpdateHandler",
]

View File

@@ -1,177 +0,0 @@
"""Base types for A2A update mechanism handlers."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, NamedTuple, Protocol
from pydantic import GetCoreSchemaHandler
from pydantic_core import CoreSchema, core_schema
from typing_extensions import TypedDict
class CommonParams(NamedTuple):
"""Common parameters shared across all update handlers.
Groups the frequently-passed parameters to reduce duplication.
"""
turn_number: int
is_multiturn: bool
agent_role: str | None
endpoint: str
a2a_agent_name: str | None
context_id: str | None
from_task: Any
from_agent: Any
if TYPE_CHECKING:
from a2a.client import Client
from a2a.types import AgentCard, Message, Task
from crewai.a2a.task_helpers import TaskStateResult
from crewai.a2a.updates.push_notifications.config import PushNotificationConfig
class BaseHandlerKwargs(TypedDict, total=False):
"""Base kwargs shared by all handlers."""
turn_number: int
is_multiturn: bool
agent_role: str | None
context_id: str | None
task_id: str | None
endpoint: str | None
agent_branch: Any
a2a_agent_name: str | None
from_task: Any
from_agent: Any
class PollingHandlerKwargs(BaseHandlerKwargs, total=False):
"""Kwargs for polling handler."""
polling_interval: float
polling_timeout: float
history_length: int
max_polls: int | None
class StreamingHandlerKwargs(BaseHandlerKwargs, total=False):
"""Kwargs for streaming handler."""
class PushNotificationHandlerKwargs(BaseHandlerKwargs, total=False):
"""Kwargs for push notification handler."""
config: PushNotificationConfig
result_store: PushNotificationResultStore
polling_timeout: float
polling_interval: float
class PushNotificationResultStore(Protocol):
"""Protocol for storing and retrieving push notification results.
This protocol defines the interface for a result store that the
PushNotificationHandler uses to wait for task completion.
"""
@classmethod
def __get_pydantic_core_schema__(
cls,
_source_type: Any,
_handler: GetCoreSchemaHandler,
) -> CoreSchema:
return core_schema.any_schema()
async def wait_for_result(
self,
task_id: str,
timeout: float,
poll_interval: float = 1.0,
) -> Task | None:
"""Wait for a task result to be available.
Args:
task_id: The task ID to wait for.
timeout: Max seconds to wait before returning None.
poll_interval: Seconds between polling attempts.
Returns:
The completed Task object, or None if timeout.
"""
...
async def get_result(self, task_id: str) -> Task | None:
"""Get a task result if available.
Args:
task_id: The task ID to retrieve.
Returns:
The Task object if available, None otherwise.
"""
...
async def store_result(self, task: Task) -> None:
"""Store a task result.
Args:
task: The Task object to store.
"""
...
class UpdateHandler(Protocol):
"""Protocol for A2A update mechanism handlers."""
@staticmethod
async def execute(
client: Client,
message: Message,
new_messages: list[Message],
agent_card: AgentCard,
**kwargs: Any,
) -> TaskStateResult:
"""Execute the update mechanism and return result.
Args:
client: A2A client instance.
message: Message to send.
new_messages: List to collect messages (modified in place).
agent_card: The agent card.
**kwargs: Additional handler-specific parameters.
Returns:
Result dictionary with status, result/error, and history.
"""
...
def extract_common_params(kwargs: BaseHandlerKwargs) -> CommonParams:
"""Extract common parameters from handler kwargs.
Args:
kwargs: Handler kwargs dict.
Returns:
CommonParams with extracted values.
Raises:
ValueError: If endpoint is not provided.
"""
endpoint = kwargs.get("endpoint")
if endpoint is None:
raise ValueError("endpoint is required for update handlers")
return CommonParams(
turn_number=kwargs.get("turn_number", 0),
is_multiturn=kwargs.get("is_multiturn", False),
agent_role=kwargs.get("agent_role"),
endpoint=endpoint,
a2a_agent_name=kwargs.get("a2a_agent_name"),
context_id=kwargs.get("context_id"),
from_task=kwargs.get("from_task"),
from_agent=kwargs.get("from_agent"),
)

View File

@@ -1 +0,0 @@
"""Polling update mechanism module."""

View File

@@ -1,25 +0,0 @@
"""Polling update mechanism configuration."""
from __future__ import annotations
from pydantic import BaseModel, Field
class PollingConfig(BaseModel):
"""Configuration for polling-based task updates.
Attributes:
interval: Seconds between poll attempts.
timeout: Max seconds to poll before raising timeout error.
max_polls: Max number of poll attempts.
history_length: Number of messages to retrieve per poll.
"""
interval: float = Field(
default=2.0, gt=0, description="Seconds between poll attempts"
)
timeout: float | None = Field(default=None, gt=0, description="Max seconds to poll")
max_polls: int | None = Field(default=None, gt=0, description="Max poll attempts")
history_length: int = Field(
default=100, gt=0, description="Messages to retrieve per poll"
)

View File

@@ -1,359 +0,0 @@
"""Polling update mechanism handler."""
from __future__ import annotations
import asyncio
import time
from typing import TYPE_CHECKING, Any
import uuid
from a2a.client import Client
from a2a.client.errors import A2AClientHTTPError
from a2a.types import (
AgentCard,
Message,
Part,
Role,
TaskQueryParams,
TaskState,
TextPart,
)
from typing_extensions import Unpack
from crewai.a2a.errors import A2APollingTimeoutError
from crewai.a2a.task_helpers import (
ACTIONABLE_STATES,
TERMINAL_STATES,
TaskStateResult,
process_task_state,
send_message_and_get_task_id,
)
from crewai.a2a.updates.base import PollingHandlerKwargs
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.a2a_events import (
A2AConnectionErrorEvent,
A2APollingStartedEvent,
A2APollingStatusEvent,
A2AResponseReceivedEvent,
)
if TYPE_CHECKING:
from a2a.types import Task as A2ATask
async def _poll_task_until_complete(
client: Client,
task_id: str,
polling_interval: float,
polling_timeout: float,
agent_branch: Any | None = None,
history_length: int = 100,
max_polls: int | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
context_id: str | None = None,
endpoint: str | None = None,
a2a_agent_name: str | None = None,
) -> A2ATask:
"""Poll task status until terminal state reached.
Args:
client: A2A client instance.
task_id: Task ID to poll.
polling_interval: Seconds between poll attempts.
polling_timeout: Max seconds before timeout.
agent_branch: Agent tree branch for logging.
history_length: Number of messages to retrieve per poll.
max_polls: Max number of poll attempts (None = unlimited).
from_task: Optional CrewAI Task object for event metadata.
from_agent: Optional CrewAI Agent object for event metadata.
context_id: A2A context ID for correlation.
endpoint: A2A agent endpoint URL.
a2a_agent_name: Name of the A2A agent from agent card.
Returns:
Final task object in terminal state.
Raises:
A2APollingTimeoutError: If polling exceeds timeout or max_polls.
"""
start_time = time.monotonic()
poll_count = 0
while True:
poll_count += 1
task = await client.get_task(
TaskQueryParams(id=task_id, history_length=history_length)
)
elapsed = time.monotonic() - start_time
effective_context_id = task.context_id or context_id
crewai_event_bus.emit(
agent_branch,
A2APollingStatusEvent(
task_id=task_id,
context_id=effective_context_id,
state=str(task.status.state.value),
elapsed_seconds=elapsed,
poll_count=poll_count,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
from_task=from_task,
from_agent=from_agent,
),
)
if task.status.state in TERMINAL_STATES | ACTIONABLE_STATES:
return task
if elapsed > polling_timeout:
raise A2APollingTimeoutError(
f"Polling timeout after {polling_timeout}s ({poll_count} polls)"
)
if max_polls and poll_count >= max_polls:
raise A2APollingTimeoutError(
f"Max polls ({max_polls}) exceeded after {elapsed:.1f}s"
)
await asyncio.sleep(polling_interval)
class PollingHandler:
"""Polling-based update handler."""
@staticmethod
async def execute(
client: Client,
message: Message,
new_messages: list[Message],
agent_card: AgentCard,
**kwargs: Unpack[PollingHandlerKwargs],
) -> TaskStateResult:
"""Execute A2A delegation using polling for updates.
Args:
client: A2A client instance.
message: Message to send.
new_messages: List to collect messages.
agent_card: The agent card.
**kwargs: Polling-specific parameters.
Returns:
Dictionary with status, result/error, and history.
"""
polling_interval = kwargs.get("polling_interval", 2.0)
polling_timeout = kwargs.get("polling_timeout", 300.0)
endpoint = kwargs.get("endpoint", "")
agent_branch = kwargs.get("agent_branch")
turn_number = kwargs.get("turn_number", 0)
is_multiturn = kwargs.get("is_multiturn", False)
agent_role = kwargs.get("agent_role")
history_length = kwargs.get("history_length", 100)
max_polls = kwargs.get("max_polls")
context_id = kwargs.get("context_id")
task_id = kwargs.get("task_id")
a2a_agent_name = kwargs.get("a2a_agent_name")
from_task = kwargs.get("from_task")
from_agent = kwargs.get("from_agent")
try:
result_or_task_id = await send_message_and_get_task_id(
event_stream=client.send_message(message),
new_messages=new_messages,
agent_card=agent_card,
turn_number=turn_number,
is_multiturn=is_multiturn,
agent_role=agent_role,
from_task=from_task,
from_agent=from_agent,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
context_id=context_id,
)
if not isinstance(result_or_task_id, str):
return result_or_task_id
task_id = result_or_task_id
crewai_event_bus.emit(
agent_branch,
A2APollingStartedEvent(
task_id=task_id,
context_id=context_id,
polling_interval=polling_interval,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
from_task=from_task,
from_agent=from_agent,
),
)
final_task = await _poll_task_until_complete(
client=client,
task_id=task_id,
polling_interval=polling_interval,
polling_timeout=polling_timeout,
agent_branch=agent_branch,
history_length=history_length,
max_polls=max_polls,
from_task=from_task,
from_agent=from_agent,
context_id=context_id,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
)
result = process_task_state(
a2a_task=final_task,
new_messages=new_messages,
agent_card=agent_card,
turn_number=turn_number,
is_multiturn=is_multiturn,
agent_role=agent_role,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
from_task=from_task,
from_agent=from_agent,
)
if result:
return result
return TaskStateResult(
status=TaskState.failed,
error=f"Unexpected task state: {final_task.status.state}",
history=new_messages,
)
except A2APollingTimeoutError as e:
error_msg = str(e)
error_message = Message(
role=Role.agent,
message_id=str(uuid.uuid4()),
parts=[Part(root=TextPart(text=error_msg))],
context_id=context_id,
task_id=task_id,
)
new_messages.append(error_message)
crewai_event_bus.emit(
agent_branch,
A2AResponseReceivedEvent(
response=error_msg,
turn_number=turn_number,
context_id=context_id,
is_multiturn=is_multiturn,
status="failed",
final=True,
agent_role=agent_role,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
from_task=from_task,
from_agent=from_agent,
),
)
return TaskStateResult(
status=TaskState.failed,
error=error_msg,
history=new_messages,
)
except A2AClientHTTPError as e:
error_msg = f"HTTP Error {e.status_code}: {e!s}"
error_message = Message(
role=Role.agent,
message_id=str(uuid.uuid4()),
parts=[Part(root=TextPart(text=error_msg))],
context_id=context_id,
task_id=task_id,
)
new_messages.append(error_message)
crewai_event_bus.emit(
agent_branch,
A2AConnectionErrorEvent(
endpoint=endpoint,
error=str(e),
error_type="http_error",
status_code=e.status_code,
a2a_agent_name=a2a_agent_name,
operation="polling",
context_id=context_id,
task_id=task_id,
from_task=from_task,
from_agent=from_agent,
),
)
crewai_event_bus.emit(
agent_branch,
A2AResponseReceivedEvent(
response=error_msg,
turn_number=turn_number,
context_id=context_id,
is_multiturn=is_multiturn,
status="failed",
final=True,
agent_role=agent_role,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
from_task=from_task,
from_agent=from_agent,
),
)
return TaskStateResult(
status=TaskState.failed,
error=error_msg,
history=new_messages,
)
except Exception as e:
error_msg = f"Unexpected error during polling: {e!s}"
error_message = Message(
role=Role.agent,
message_id=str(uuid.uuid4()),
parts=[Part(root=TextPart(text=error_msg))],
context_id=context_id,
task_id=task_id,
)
new_messages.append(error_message)
crewai_event_bus.emit(
agent_branch,
A2AConnectionErrorEvent(
endpoint=endpoint,
error=str(e),
error_type="unexpected_error",
a2a_agent_name=a2a_agent_name,
operation="polling",
context_id=context_id,
task_id=task_id,
from_task=from_task,
from_agent=from_agent,
),
)
crewai_event_bus.emit(
agent_branch,
A2AResponseReceivedEvent(
response=error_msg,
turn_number=turn_number,
context_id=context_id,
is_multiturn=is_multiturn,
status="failed",
final=True,
agent_role=agent_role,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
from_task=from_task,
from_agent=from_agent,
),
)
return TaskStateResult(
status=TaskState.failed,
error=error_msg,
history=new_messages,
)

View File

@@ -1 +0,0 @@
"""Push notification update mechanism module."""

View File

@@ -1,65 +0,0 @@
"""Push notification update mechanism configuration."""
from __future__ import annotations
from typing import Annotated
from a2a.types import PushNotificationAuthenticationInfo
from pydantic import AnyHttpUrl, BaseModel, BeforeValidator, Field
from crewai.a2a.updates.base import PushNotificationResultStore
from crewai.a2a.updates.push_notifications.signature import WebhookSignatureConfig
def _coerce_signature(
value: str | WebhookSignatureConfig | None,
) -> WebhookSignatureConfig | None:
"""Convert string secret to WebhookSignatureConfig."""
if value is None:
return None
if isinstance(value, str):
return WebhookSignatureConfig.hmac_sha256(secret=value)
return value
SignatureInput = Annotated[
WebhookSignatureConfig | None,
BeforeValidator(_coerce_signature),
]
class PushNotificationConfig(BaseModel):
"""Configuration for webhook-based task updates.
Attributes:
url: Callback URL where agent sends push notifications.
id: Unique identifier for this config.
token: Token to validate incoming notifications.
authentication: Auth info for agent to use when calling webhook.
timeout: Max seconds to wait for task completion.
interval: Seconds between result polling attempts.
result_store: Store for receiving push notification results.
signature: HMAC signature config. Pass a string (secret) for defaults,
or WebhookSignatureConfig for custom settings.
"""
url: AnyHttpUrl = Field(description="Callback URL for push notifications")
id: str | None = Field(default=None, description="Unique config identifier")
token: str | None = Field(default=None, description="Validation token")
authentication: PushNotificationAuthenticationInfo | None = Field(
default=None, description="Auth info for agent to use when calling webhook"
)
timeout: float | None = Field(
default=300.0, gt=0, description="Max seconds to wait for task completion"
)
interval: float = Field(
default=2.0, gt=0, description="Seconds between result polling attempts"
)
result_store: PushNotificationResultStore | None = Field(
default=None, description="Result store for push notification handling"
)
signature: SignatureInput = Field(
default=None,
description="HMAC signature config. Pass a string (secret) for simple usage, "
"or WebhookSignatureConfig for custom headers/tolerance.",
)

View File

@@ -1,354 +0,0 @@
"""Push notification (webhook) update mechanism handler."""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
import uuid
from a2a.client import Client
from a2a.client.errors import A2AClientHTTPError
from a2a.types import (
AgentCard,
Message,
Part,
Role,
TaskState,
TextPart,
)
from typing_extensions import Unpack
from crewai.a2a.task_helpers import (
TaskStateResult,
process_task_state,
send_message_and_get_task_id,
)
from crewai.a2a.updates.base import (
CommonParams,
PushNotificationHandlerKwargs,
PushNotificationResultStore,
extract_common_params,
)
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.a2a_events import (
A2AConnectionErrorEvent,
A2APushNotificationRegisteredEvent,
A2APushNotificationTimeoutEvent,
A2AResponseReceivedEvent,
)
if TYPE_CHECKING:
from a2a.types import Task as A2ATask
logger = logging.getLogger(__name__)
def _handle_push_error(
error: Exception,
error_msg: str,
error_type: str,
new_messages: list[Message],
agent_branch: Any | None,
params: CommonParams,
task_id: str | None,
status_code: int | None = None,
) -> TaskStateResult:
"""Handle push notification errors with consistent event emission.
Args:
error: The exception that occurred.
error_msg: Formatted error message for the result.
error_type: Type of error for the event.
new_messages: List to append error message to.
agent_branch: Agent tree branch for events.
params: Common handler parameters.
task_id: A2A task ID.
status_code: HTTP status code if applicable.
Returns:
TaskStateResult with failed status.
"""
error_message = Message(
role=Role.agent,
message_id=str(uuid.uuid4()),
parts=[Part(root=TextPart(text=error_msg))],
context_id=params.context_id,
task_id=task_id,
)
new_messages.append(error_message)
crewai_event_bus.emit(
agent_branch,
A2AConnectionErrorEvent(
endpoint=params.endpoint,
error=str(error),
error_type=error_type,
status_code=status_code,
a2a_agent_name=params.a2a_agent_name,
operation="push_notification",
context_id=params.context_id,
task_id=task_id,
from_task=params.from_task,
from_agent=params.from_agent,
),
)
crewai_event_bus.emit(
agent_branch,
A2AResponseReceivedEvent(
response=error_msg,
turn_number=params.turn_number,
context_id=params.context_id,
is_multiturn=params.is_multiturn,
status="failed",
final=True,
agent_role=params.agent_role,
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
from_task=params.from_task,
from_agent=params.from_agent,
),
)
return TaskStateResult(
status=TaskState.failed,
error=error_msg,
history=new_messages,
)
async def _wait_for_push_result(
task_id: str,
result_store: PushNotificationResultStore,
timeout: float,
poll_interval: float,
agent_branch: Any | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
context_id: str | None = None,
endpoint: str | None = None,
a2a_agent_name: str | None = None,
) -> A2ATask | None:
"""Wait for push notification result.
Args:
task_id: Task ID to wait for.
result_store: Store to retrieve results from.
timeout: Max seconds to wait.
poll_interval: Seconds between polling attempts.
agent_branch: Agent tree branch for logging.
from_task: Optional CrewAI Task object for event metadata.
from_agent: Optional CrewAI Agent object for event metadata.
context_id: A2A context ID for correlation.
endpoint: A2A agent endpoint URL.
a2a_agent_name: Name of the A2A agent.
Returns:
Final task object, or None if timeout.
"""
task = await result_store.wait_for_result(
task_id=task_id,
timeout=timeout,
poll_interval=poll_interval,
)
if task is None:
crewai_event_bus.emit(
agent_branch,
A2APushNotificationTimeoutEvent(
task_id=task_id,
context_id=context_id,
timeout_seconds=timeout,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
from_task=from_task,
from_agent=from_agent,
),
)
return task
class PushNotificationHandler:
"""Push notification (webhook) based update handler."""
@staticmethod
async def execute(
client: Client,
message: Message,
new_messages: list[Message],
agent_card: AgentCard,
**kwargs: Unpack[PushNotificationHandlerKwargs],
) -> TaskStateResult:
"""Execute A2A delegation using push notifications for updates.
Args:
client: A2A client instance.
message: Message to send.
new_messages: List to collect messages.
agent_card: The agent card.
**kwargs: Push notification-specific parameters.
Returns:
Dictionary with status, result/error, and history.
Raises:
ValueError: If result_store or config not provided.
"""
config = kwargs.get("config")
result_store = kwargs.get("result_store")
polling_timeout = kwargs.get("polling_timeout", 300.0)
polling_interval = kwargs.get("polling_interval", 2.0)
agent_branch = kwargs.get("agent_branch")
task_id = kwargs.get("task_id")
params = extract_common_params(kwargs)
if config is None:
error_msg = (
"PushNotificationConfig is required for push notification handler"
)
crewai_event_bus.emit(
agent_branch,
A2AConnectionErrorEvent(
endpoint=params.endpoint,
error=error_msg,
error_type="configuration_error",
a2a_agent_name=params.a2a_agent_name,
operation="push_notification",
context_id=params.context_id,
task_id=task_id,
from_task=params.from_task,
from_agent=params.from_agent,
),
)
return TaskStateResult(
status=TaskState.failed,
error=error_msg,
history=new_messages,
)
if result_store is None:
error_msg = (
"PushNotificationResultStore is required for push notification handler"
)
crewai_event_bus.emit(
agent_branch,
A2AConnectionErrorEvent(
endpoint=params.endpoint,
error=error_msg,
error_type="configuration_error",
a2a_agent_name=params.a2a_agent_name,
operation="push_notification",
context_id=params.context_id,
task_id=task_id,
from_task=params.from_task,
from_agent=params.from_agent,
),
)
return TaskStateResult(
status=TaskState.failed,
error=error_msg,
history=new_messages,
)
try:
result_or_task_id = await send_message_and_get_task_id(
event_stream=client.send_message(message),
new_messages=new_messages,
agent_card=agent_card,
turn_number=params.turn_number,
is_multiturn=params.is_multiturn,
agent_role=params.agent_role,
from_task=params.from_task,
from_agent=params.from_agent,
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
context_id=params.context_id,
)
if not isinstance(result_or_task_id, str):
return result_or_task_id
task_id = result_or_task_id
crewai_event_bus.emit(
agent_branch,
A2APushNotificationRegisteredEvent(
task_id=task_id,
context_id=params.context_id,
callback_url=str(config.url),
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
from_task=params.from_task,
from_agent=params.from_agent,
),
)
logger.debug(
"Push notification callback for task %s configured at %s (via initial request)",
task_id,
config.url,
)
final_task = await _wait_for_push_result(
task_id=task_id,
result_store=result_store,
timeout=polling_timeout,
poll_interval=polling_interval,
agent_branch=agent_branch,
from_task=params.from_task,
from_agent=params.from_agent,
context_id=params.context_id,
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
)
if final_task is None:
return TaskStateResult(
status=TaskState.failed,
error=f"Push notification timeout after {polling_timeout}s",
history=new_messages,
)
result = process_task_state(
a2a_task=final_task,
new_messages=new_messages,
agent_card=agent_card,
turn_number=params.turn_number,
is_multiturn=params.is_multiturn,
agent_role=params.agent_role,
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
from_task=params.from_task,
from_agent=params.from_agent,
)
if result:
return result
return TaskStateResult(
status=TaskState.failed,
error=f"Unexpected task state: {final_task.status.state}",
history=new_messages,
)
except A2AClientHTTPError as e:
return _handle_push_error(
error=e,
error_msg=f"HTTP Error {e.status_code}: {e!s}",
error_type="http_error",
new_messages=new_messages,
agent_branch=agent_branch,
params=params,
task_id=task_id,
status_code=e.status_code,
)
except Exception as e:
return _handle_push_error(
error=e,
error_msg=f"Unexpected error during push notification: {e!s}",
error_type="unexpected_error",
new_messages=new_messages,
agent_branch=agent_branch,
params=params,
task_id=task_id,
)

View File

@@ -1,87 +0,0 @@
"""Webhook signature configuration for push notifications."""
from __future__ import annotations
from enum import Enum
import secrets
from pydantic import BaseModel, Field, SecretStr
class WebhookSignatureMode(str, Enum):
"""Signature mode for webhook push notifications."""
NONE = "none"
HMAC_SHA256 = "hmac_sha256"
class WebhookSignatureConfig(BaseModel):
"""Configuration for webhook signature verification.
Provides cryptographic integrity verification and replay attack protection
for A2A push notifications.
Attributes:
mode: Signature mode (none or hmac_sha256).
secret: Shared secret for HMAC computation (required for hmac_sha256 mode).
timestamp_tolerance_seconds: Max allowed age of timestamps for replay protection.
header_name: HTTP header name for the signature.
timestamp_header_name: HTTP header name for the timestamp.
"""
mode: WebhookSignatureMode = Field(
default=WebhookSignatureMode.NONE,
description="Signature verification mode",
)
secret: SecretStr | None = Field(
default=None,
description="Shared secret for HMAC computation",
)
timestamp_tolerance_seconds: int = Field(
default=300,
ge=0,
description="Max allowed timestamp age in seconds (5 min default)",
)
header_name: str = Field(
default="X-A2A-Signature",
description="HTTP header name for the signature",
)
timestamp_header_name: str = Field(
default="X-A2A-Signature-Timestamp",
description="HTTP header name for the timestamp",
)
@classmethod
def generate_secret(cls, length: int = 32) -> str:
"""Generate a cryptographically secure random secret.
Args:
length: Number of random bytes to generate (default 32).
Returns:
URL-safe base64-encoded secret string.
"""
return secrets.token_urlsafe(length)
@classmethod
def hmac_sha256(
cls,
secret: str | SecretStr,
timestamp_tolerance_seconds: int = 300,
) -> WebhookSignatureConfig:
"""Create an HMAC-SHA256 signature configuration.
Args:
secret: Shared secret for HMAC computation.
timestamp_tolerance_seconds: Max allowed timestamp age in seconds.
Returns:
Configured WebhookSignatureConfig for HMAC-SHA256.
"""
if isinstance(secret, str):
secret = SecretStr(secret)
return cls(
mode=WebhookSignatureMode.HMAC_SHA256,
secret=secret,
timestamp_tolerance_seconds=timestamp_tolerance_seconds,
)

View File

@@ -1 +0,0 @@
"""Streaming update mechanism module."""

View File

@@ -1,9 +0,0 @@
"""Streaming update mechanism configuration."""
from __future__ import annotations
from pydantic import BaseModel
class StreamingConfig(BaseModel):
"""Configuration for SSE-based task updates."""

View File

@@ -1,646 +0,0 @@
"""Streaming (SSE) update mechanism handler."""
from __future__ import annotations
import asyncio
import logging
from typing import Final
import uuid
from a2a.client import Client
from a2a.client.errors import A2AClientHTTPError
from a2a.types import (
AgentCard,
Message,
Part,
Role,
Task,
TaskArtifactUpdateEvent,
TaskIdParams,
TaskQueryParams,
TaskState,
TaskStatusUpdateEvent,
TextPart,
)
from typing_extensions import Unpack
from crewai.a2a.task_helpers import (
ACTIONABLE_STATES,
TERMINAL_STATES,
TaskStateResult,
process_task_state,
)
from crewai.a2a.updates.base import StreamingHandlerKwargs, extract_common_params
from crewai.a2a.updates.streaming.params import (
process_status_update,
)
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.a2a_events import (
A2AArtifactReceivedEvent,
A2AConnectionErrorEvent,
A2AResponseReceivedEvent,
A2AStreamingChunkEvent,
A2AStreamingStartedEvent,
)
logger = logging.getLogger(__name__)
MAX_RESUBSCRIBE_ATTEMPTS: Final[int] = 3
RESUBSCRIBE_BACKOFF_BASE: Final[float] = 1.0
class StreamingHandler:
"""SSE streaming-based update handler."""
@staticmethod
async def _try_recover_from_interruption( # type: ignore[misc]
client: Client,
task_id: str,
new_messages: list[Message],
agent_card: AgentCard,
result_parts: list[str],
**kwargs: Unpack[StreamingHandlerKwargs],
) -> TaskStateResult | None:
"""Attempt to recover from a stream interruption by checking task state.
If the task completed while we were disconnected, returns the result.
If the task is still running, attempts to resubscribe and continue.
Args:
client: A2A client instance.
task_id: The task ID to recover.
new_messages: List of collected messages.
agent_card: The agent card.
result_parts: Accumulated result text parts.
**kwargs: Handler parameters.
Returns:
TaskStateResult if recovery succeeded (task finished or resubscribe worked).
None if recovery not possible (caller should handle failure).
Note:
When None is returned, recovery failed and the original exception should
be handled by the caller. All recovery attempts are logged.
"""
params = extract_common_params(kwargs) # type: ignore[arg-type]
try:
a2a_task: Task = await client.get_task(TaskQueryParams(id=task_id))
if a2a_task.status.state in TERMINAL_STATES:
logger.info(
"Task completed during stream interruption",
extra={"task_id": task_id, "state": str(a2a_task.status.state)},
)
return process_task_state(
a2a_task=a2a_task,
new_messages=new_messages,
agent_card=agent_card,
turn_number=params.turn_number,
is_multiturn=params.is_multiturn,
agent_role=params.agent_role,
result_parts=result_parts,
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
from_task=params.from_task,
from_agent=params.from_agent,
)
if a2a_task.status.state in ACTIONABLE_STATES:
logger.info(
"Task in actionable state during stream interruption",
extra={"task_id": task_id, "state": str(a2a_task.status.state)},
)
return process_task_state(
a2a_task=a2a_task,
new_messages=new_messages,
agent_card=agent_card,
turn_number=params.turn_number,
is_multiturn=params.is_multiturn,
agent_role=params.agent_role,
result_parts=result_parts,
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
from_task=params.from_task,
from_agent=params.from_agent,
is_final=False,
)
logger.info(
"Task still running, attempting resubscribe",
extra={"task_id": task_id, "state": str(a2a_task.status.state)},
)
for attempt in range(MAX_RESUBSCRIBE_ATTEMPTS):
try:
backoff = RESUBSCRIBE_BACKOFF_BASE * (2**attempt)
if attempt > 0:
await asyncio.sleep(backoff)
event_stream = client.resubscribe(TaskIdParams(id=task_id))
async for event in event_stream:
if isinstance(event, tuple):
resubscribed_task, update = event
is_final_update = (
process_status_update(update, result_parts)
if isinstance(update, TaskStatusUpdateEvent)
else False
)
if isinstance(update, TaskArtifactUpdateEvent):
artifact = update.artifact
result_parts.extend(
part.root.text
for part in artifact.parts
if part.root.kind == "text"
)
if (
is_final_update
or resubscribed_task.status.state
in TERMINAL_STATES | ACTIONABLE_STATES
):
return process_task_state(
a2a_task=resubscribed_task,
new_messages=new_messages,
agent_card=agent_card,
turn_number=params.turn_number,
is_multiturn=params.is_multiturn,
agent_role=params.agent_role,
result_parts=result_parts,
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
from_task=params.from_task,
from_agent=params.from_agent,
is_final=is_final_update,
)
elif isinstance(event, Message):
new_messages.append(event)
result_parts.extend(
part.root.text
for part in event.parts
if part.root.kind == "text"
)
final_task = await client.get_task(TaskQueryParams(id=task_id))
return process_task_state(
a2a_task=final_task,
new_messages=new_messages,
agent_card=agent_card,
turn_number=params.turn_number,
is_multiturn=params.is_multiturn,
agent_role=params.agent_role,
result_parts=result_parts,
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
from_task=params.from_task,
from_agent=params.from_agent,
)
except Exception as resubscribe_error: # noqa: PERF203
logger.warning(
"Resubscribe attempt failed",
extra={
"task_id": task_id,
"attempt": attempt + 1,
"max_attempts": MAX_RESUBSCRIBE_ATTEMPTS,
"error": str(resubscribe_error),
},
)
if attempt == MAX_RESUBSCRIBE_ATTEMPTS - 1:
return None
except Exception as e:
logger.warning(
"Failed to recover from stream interruption due to unexpected error",
extra={
"task_id": task_id,
"error": str(e),
"error_type": type(e).__name__,
},
exc_info=True,
)
return None
logger.warning(
"Recovery exhausted all resubscribe attempts without success",
extra={"task_id": task_id, "max_attempts": MAX_RESUBSCRIBE_ATTEMPTS},
)
return None
@staticmethod
async def execute(
client: Client,
message: Message,
new_messages: list[Message],
agent_card: AgentCard,
**kwargs: Unpack[StreamingHandlerKwargs],
) -> TaskStateResult:
"""Execute A2A delegation using SSE streaming for updates.
Args:
client: A2A client instance.
message: Message to send.
new_messages: List to collect messages.
agent_card: The agent card.
**kwargs: Streaming-specific parameters.
Returns:
Dictionary with status, result/error, and history.
"""
task_id = kwargs.get("task_id")
agent_branch = kwargs.get("agent_branch")
params = extract_common_params(kwargs)
result_parts: list[str] = []
final_result: TaskStateResult | None = None
event_stream = client.send_message(message)
chunk_index = 0
current_task_id: str | None = task_id
crewai_event_bus.emit(
agent_branch,
A2AStreamingStartedEvent(
task_id=task_id,
context_id=params.context_id,
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
turn_number=params.turn_number,
is_multiturn=params.is_multiturn,
agent_role=params.agent_role,
from_task=params.from_task,
from_agent=params.from_agent,
),
)
try:
async for event in event_stream:
if isinstance(event, tuple):
a2a_task, _ = event
current_task_id = a2a_task.id
if isinstance(event, Message):
new_messages.append(event)
message_context_id = event.context_id or params.context_id
for part in event.parts:
if part.root.kind == "text":
text = part.root.text
result_parts.append(text)
crewai_event_bus.emit(
agent_branch,
A2AStreamingChunkEvent(
task_id=event.task_id or task_id,
context_id=message_context_id,
chunk=text,
chunk_index=chunk_index,
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
turn_number=params.turn_number,
is_multiturn=params.is_multiturn,
from_task=params.from_task,
from_agent=params.from_agent,
),
)
chunk_index += 1
elif isinstance(event, tuple):
a2a_task, update = event
if isinstance(update, TaskArtifactUpdateEvent):
artifact = update.artifact
result_parts.extend(
part.root.text
for part in artifact.parts
if part.root.kind == "text"
)
artifact_size = None
if artifact.parts:
artifact_size = sum(
len(p.root.text.encode())
if p.root.kind == "text"
else len(getattr(p.root, "data", b""))
for p in artifact.parts
)
effective_context_id = a2a_task.context_id or params.context_id
crewai_event_bus.emit(
agent_branch,
A2AArtifactReceivedEvent(
task_id=a2a_task.id,
artifact_id=artifact.artifact_id,
artifact_name=artifact.name,
artifact_description=artifact.description,
mime_type=artifact.parts[0].root.kind
if artifact.parts
else None,
size_bytes=artifact_size,
append=update.append or False,
last_chunk=update.last_chunk or False,
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
context_id=effective_context_id,
turn_number=params.turn_number,
is_multiturn=params.is_multiturn,
from_task=params.from_task,
from_agent=params.from_agent,
),
)
is_final_update = (
process_status_update(update, result_parts)
if isinstance(update, TaskStatusUpdateEvent)
else False
)
if (
not is_final_update
and a2a_task.status.state
not in TERMINAL_STATES | ACTIONABLE_STATES
):
continue
final_result = process_task_state(
a2a_task=a2a_task,
new_messages=new_messages,
agent_card=agent_card,
turn_number=params.turn_number,
is_multiturn=params.is_multiturn,
agent_role=params.agent_role,
result_parts=result_parts,
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
from_task=params.from_task,
from_agent=params.from_agent,
is_final=is_final_update,
)
if final_result:
break
except A2AClientHTTPError as e:
if current_task_id:
logger.info(
"Stream interrupted with HTTP error, attempting recovery",
extra={
"task_id": current_task_id,
"error": str(e),
"status_code": e.status_code,
},
)
recovery_kwargs = {k: v for k, v in kwargs.items() if k != "task_id"}
recovered_result = (
await StreamingHandler._try_recover_from_interruption(
client=client,
task_id=current_task_id,
new_messages=new_messages,
agent_card=agent_card,
result_parts=result_parts,
**recovery_kwargs,
)
)
if recovered_result:
logger.info(
"Successfully recovered task after HTTP error",
extra={
"task_id": current_task_id,
"status": str(recovered_result.get("status")),
},
)
return recovered_result
logger.warning(
"Failed to recover from HTTP error, returning failure",
extra={
"task_id": current_task_id,
"status_code": e.status_code,
"original_error": str(e),
},
)
error_msg = f"HTTP Error {e.status_code}: {e!s}"
error_type = "http_error"
status_code = e.status_code
error_message = Message(
role=Role.agent,
message_id=str(uuid.uuid4()),
parts=[Part(root=TextPart(text=error_msg))],
context_id=params.context_id,
task_id=task_id,
)
new_messages.append(error_message)
crewai_event_bus.emit(
agent_branch,
A2AConnectionErrorEvent(
endpoint=params.endpoint,
error=str(e),
error_type=error_type,
status_code=status_code,
a2a_agent_name=params.a2a_agent_name,
operation="streaming",
context_id=params.context_id,
task_id=task_id,
from_task=params.from_task,
from_agent=params.from_agent,
),
)
crewai_event_bus.emit(
agent_branch,
A2AResponseReceivedEvent(
response=error_msg,
turn_number=params.turn_number,
context_id=params.context_id,
is_multiturn=params.is_multiturn,
status="failed",
final=True,
agent_role=params.agent_role,
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
from_task=params.from_task,
from_agent=params.from_agent,
),
)
return TaskStateResult(
status=TaskState.failed,
error=error_msg,
history=new_messages,
)
except (asyncio.TimeoutError, asyncio.CancelledError, ConnectionError) as e:
error_type = type(e).__name__.lower()
if current_task_id:
logger.info(
f"Stream interrupted with {error_type}, attempting recovery",
extra={"task_id": current_task_id, "error": str(e)},
)
recovery_kwargs = {k: v for k, v in kwargs.items() if k != "task_id"}
recovered_result = (
await StreamingHandler._try_recover_from_interruption(
client=client,
task_id=current_task_id,
new_messages=new_messages,
agent_card=agent_card,
result_parts=result_parts,
**recovery_kwargs,
)
)
if recovered_result:
logger.info(
f"Successfully recovered task after {error_type}",
extra={
"task_id": current_task_id,
"status": str(recovered_result.get("status")),
},
)
return recovered_result
logger.warning(
f"Failed to recover from {error_type}, returning failure",
extra={
"task_id": current_task_id,
"error_type": error_type,
"original_error": str(e),
},
)
error_msg = f"Connection error during streaming: {e!s}"
status_code = None
error_message = Message(
role=Role.agent,
message_id=str(uuid.uuid4()),
parts=[Part(root=TextPart(text=error_msg))],
context_id=params.context_id,
task_id=task_id,
)
new_messages.append(error_message)
crewai_event_bus.emit(
agent_branch,
A2AConnectionErrorEvent(
endpoint=params.endpoint,
error=str(e),
error_type=error_type,
status_code=status_code,
a2a_agent_name=params.a2a_agent_name,
operation="streaming",
context_id=params.context_id,
task_id=task_id,
from_task=params.from_task,
from_agent=params.from_agent,
),
)
crewai_event_bus.emit(
agent_branch,
A2AResponseReceivedEvent(
response=error_msg,
turn_number=params.turn_number,
context_id=params.context_id,
is_multiturn=params.is_multiturn,
status="failed",
final=True,
agent_role=params.agent_role,
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
from_task=params.from_task,
from_agent=params.from_agent,
),
)
return TaskStateResult(
status=TaskState.failed,
error=error_msg,
history=new_messages,
)
except Exception as e:
logger.exception(
"Unexpected error during streaming",
extra={
"task_id": current_task_id,
"error_type": type(e).__name__,
"endpoint": params.endpoint,
},
)
error_msg = f"Unexpected error during streaming: {type(e).__name__}: {e!s}"
error_type = "unexpected_error"
status_code = None
error_message = Message(
role=Role.agent,
message_id=str(uuid.uuid4()),
parts=[Part(root=TextPart(text=error_msg))],
context_id=params.context_id,
task_id=task_id,
)
new_messages.append(error_message)
crewai_event_bus.emit(
agent_branch,
A2AConnectionErrorEvent(
endpoint=params.endpoint,
error=str(e),
error_type=error_type,
status_code=status_code,
a2a_agent_name=params.a2a_agent_name,
operation="streaming",
context_id=params.context_id,
task_id=task_id,
from_task=params.from_task,
from_agent=params.from_agent,
),
)
crewai_event_bus.emit(
agent_branch,
A2AResponseReceivedEvent(
response=error_msg,
turn_number=params.turn_number,
context_id=params.context_id,
is_multiturn=params.is_multiturn,
status="failed",
final=True,
agent_role=params.agent_role,
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
from_task=params.from_task,
from_agent=params.from_agent,
),
)
return TaskStateResult(
status=TaskState.failed,
error=error_msg,
history=new_messages,
)
finally:
aclose = getattr(event_stream, "aclose", None)
if aclose:
try:
await aclose()
except Exception as close_error:
crewai_event_bus.emit(
agent_branch,
A2AConnectionErrorEvent(
endpoint=params.endpoint,
error=str(close_error),
error_type="stream_close_error",
a2a_agent_name=params.a2a_agent_name,
operation="stream_close",
context_id=params.context_id,
task_id=task_id,
from_task=params.from_task,
from_agent=params.from_agent,
),
)
if final_result:
return final_result
return TaskStateResult(
status=TaskState.completed,
result=" ".join(result_parts) if result_parts else "",
history=new_messages,
agent_card=agent_card.model_dump(exclude_none=True),
)

View File

@@ -1,28 +0,0 @@
"""Common parameter extraction for streaming handlers."""
from __future__ import annotations
from a2a.types import TaskStatusUpdateEvent
def process_status_update(
update: TaskStatusUpdateEvent,
result_parts: list[str],
) -> bool:
"""Process a status update event and extract text parts.
Args:
update: The status update event.
result_parts: List to append text parts to (modified in place).
Returns:
True if this is a final update, False otherwise.
"""
is_final = update.final
if update.status and update.status.message and update.status.message.parts:
result_parts.extend(
part.root.text
for part in update.status.message.parts
if part.root.kind == "text" and part.root.text
)
return is_final

View File

@@ -1 +0,0 @@
"""A2A utility modules for client operations."""

View File

@@ -1,596 +0,0 @@
"""AgentCard utilities for A2A client and server operations."""
from __future__ import annotations
import asyncio
from collections.abc import MutableMapping
import concurrent.futures
import contextvars
from functools import lru_cache
import ssl
import time
from types import MethodType
from typing import TYPE_CHECKING
from a2a.client.errors import A2AClientHTTPError
from a2a.types import AgentCapabilities, AgentCard, AgentSkill
from aiocache import cached # type: ignore[import-untyped]
from aiocache.serializers import PickleSerializer # type: ignore[import-untyped]
import httpx
from crewai.a2a.auth.client_schemes import APIKeyAuth, HTTPDigestAuth
from crewai.a2a.auth.utils import (
_auth_store,
configure_auth_client,
retry_on_401,
)
from crewai.a2a.config import A2AServerConfig
from crewai.crew import Crew
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.a2a_events import (
A2AAgentCardFetchedEvent,
A2AAuthenticationFailedEvent,
A2AConnectionErrorEvent,
)
if TYPE_CHECKING:
from crewai.a2a.auth.client_schemes import ClientAuthScheme
from crewai.agent import Agent
from crewai.task import Task
def _get_tls_verify(auth: ClientAuthScheme | None) -> ssl.SSLContext | bool | str:
"""Get TLS verify parameter from auth scheme.
Args:
auth: Optional authentication scheme with TLS config.
Returns:
SSL context, CA cert path, True for default verification,
or False if verification disabled.
"""
if auth and auth.tls:
return auth.tls.get_httpx_ssl_context()
return True
async def _prepare_auth_headers(
auth: ClientAuthScheme | None,
timeout: int,
) -> tuple[MutableMapping[str, str], ssl.SSLContext | bool | str]:
"""Prepare authentication headers and TLS verification settings.
Args:
auth: Optional authentication scheme.
timeout: Request timeout in seconds.
Returns:
Tuple of (headers dict, TLS verify setting).
"""
headers: MutableMapping[str, str] = {}
verify = _get_tls_verify(auth)
if auth:
async with httpx.AsyncClient(
timeout=timeout, verify=verify
) as temp_auth_client:
if isinstance(auth, (HTTPDigestAuth, APIKeyAuth)):
configure_auth_client(auth, temp_auth_client)
headers = await auth.apply_auth(temp_auth_client, {})
return headers, verify
def _get_server_config(agent: Agent) -> A2AServerConfig | None:
"""Get A2AServerConfig from an agent's a2a configuration.
Args:
agent: The Agent instance to check.
Returns:
A2AServerConfig if present, None otherwise.
"""
if agent.a2a is None:
return None
if isinstance(agent.a2a, A2AServerConfig):
return agent.a2a
if isinstance(agent.a2a, list):
for config in agent.a2a:
if isinstance(config, A2AServerConfig):
return config
return None
def fetch_agent_card(
endpoint: str,
auth: ClientAuthScheme | None = None,
timeout: int = 30,
use_cache: bool = True,
cache_ttl: int = 300,
) -> AgentCard:
"""Fetch AgentCard from an A2A endpoint with optional caching.
Args:
endpoint: A2A agent endpoint URL (AgentCard URL).
auth: Optional ClientAuthScheme for authentication.
timeout: Request timeout in seconds.
use_cache: Whether to use caching (default True).
cache_ttl: Cache TTL in seconds (default 300 = 5 minutes).
Returns:
AgentCard object with agent capabilities and skills.
Raises:
httpx.HTTPStatusError: If the request fails.
A2AClientHTTPError: If authentication fails.
"""
if use_cache:
if auth:
auth_data = auth.model_dump_json(
exclude={
"_access_token",
"_token_expires_at",
"_refresh_token",
"_authorization_callback",
}
)
auth_hash = _auth_store.compute_key(type(auth).__name__, auth_data)
else:
auth_hash = _auth_store.compute_key("none", "")
_auth_store.set(auth_hash, auth)
ttl_hash = int(time.time() // cache_ttl)
return _fetch_agent_card_cached(endpoint, auth_hash, timeout, ttl_hash)
coro = afetch_agent_card(endpoint=endpoint, auth=auth, timeout=timeout)
try:
asyncio.get_running_loop()
has_running_loop = True
except RuntimeError:
has_running_loop = False
if has_running_loop:
ctx = contextvars.copy_context()
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as pool:
return pool.submit(ctx.run, asyncio.run, coro).result()
return asyncio.run(coro)
async def afetch_agent_card(
endpoint: str,
auth: ClientAuthScheme | None = None,
timeout: int = 30,
use_cache: bool = True,
) -> AgentCard:
"""Fetch AgentCard from an A2A endpoint asynchronously.
Native async implementation. Use this when running in an async context.
Args:
endpoint: A2A agent endpoint URL (AgentCard URL).
auth: Optional ClientAuthScheme for authentication.
timeout: Request timeout in seconds.
use_cache: Whether to use caching (default True).
Returns:
AgentCard object with agent capabilities and skills.
Raises:
httpx.HTTPStatusError: If the request fails.
A2AClientHTTPError: If authentication fails.
"""
if use_cache:
if auth:
auth_data = auth.model_dump_json(
exclude={
"_access_token",
"_token_expires_at",
"_refresh_token",
"_authorization_callback",
}
)
auth_hash = _auth_store.compute_key(type(auth).__name__, auth_data)
else:
auth_hash = _auth_store.compute_key("none", "")
_auth_store.set(auth_hash, auth)
agent_card: AgentCard = await _afetch_agent_card_cached(
endpoint, auth_hash, timeout
)
return agent_card
return await _afetch_agent_card_impl(endpoint=endpoint, auth=auth, timeout=timeout)
@lru_cache()
def _fetch_agent_card_cached(
endpoint: str,
auth_hash: str,
timeout: int,
_ttl_hash: int,
) -> AgentCard:
"""Cached sync version of fetch_agent_card."""
auth = _auth_store.get(auth_hash)
coro = _afetch_agent_card_impl(endpoint=endpoint, auth=auth, timeout=timeout)
try:
asyncio.get_running_loop()
has_running_loop = True
except RuntimeError:
has_running_loop = False
if has_running_loop:
ctx = contextvars.copy_context()
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as pool:
return pool.submit(ctx.run, asyncio.run, coro).result()
return asyncio.run(coro)
@cached(ttl=300, serializer=PickleSerializer()) # type: ignore[untyped-decorator]
async def _afetch_agent_card_cached(
endpoint: str,
auth_hash: str,
timeout: int,
) -> AgentCard:
"""Cached async implementation of AgentCard fetching."""
auth = _auth_store.get(auth_hash)
return await _afetch_agent_card_impl(endpoint=endpoint, auth=auth, timeout=timeout)
async def _afetch_agent_card_impl(
endpoint: str,
auth: ClientAuthScheme | None,
timeout: int,
) -> AgentCard:
"""Internal async implementation of AgentCard fetching."""
start_time = time.perf_counter()
if "/.well-known/agent-card.json" in endpoint:
base_url = endpoint.replace("/.well-known/agent-card.json", "")
agent_card_path = "/.well-known/agent-card.json"
else:
url_parts = endpoint.split("/", 3)
base_url = f"{url_parts[0]}//{url_parts[2]}"
agent_card_path = (
f"/{url_parts[3]}"
if len(url_parts) > 3 and url_parts[3]
else "/.well-known/agent-card.json"
)
headers, verify = await _prepare_auth_headers(auth, timeout)
async with httpx.AsyncClient(
timeout=timeout, headers=headers, verify=verify
) as temp_client:
if auth and isinstance(auth, (HTTPDigestAuth, APIKeyAuth)):
configure_auth_client(auth, temp_client)
agent_card_url = f"{base_url}{agent_card_path}"
async def _fetch_agent_card_request() -> httpx.Response:
return await temp_client.get(agent_card_url)
try:
response = await retry_on_401(
request_func=_fetch_agent_card_request,
auth_scheme=auth,
client=temp_client,
headers=temp_client.headers,
max_retries=2,
)
response.raise_for_status()
agent_card = AgentCard.model_validate(response.json())
fetch_time_ms = (time.perf_counter() - start_time) * 1000
agent_card_dict = agent_card.model_dump(exclude_none=True)
crewai_event_bus.emit(
None,
A2AAgentCardFetchedEvent(
endpoint=endpoint,
a2a_agent_name=agent_card.name,
agent_card=agent_card_dict,
protocol_version=agent_card.protocol_version,
provider=agent_card_dict.get("provider"),
cached=False,
fetch_time_ms=fetch_time_ms,
),
)
return agent_card
except httpx.HTTPStatusError as e:
elapsed_ms = (time.perf_counter() - start_time) * 1000
response_body = e.response.text[:1000] if e.response.text else None
if e.response.status_code == 401:
error_details = ["Authentication failed"]
www_auth = e.response.headers.get("WWW-Authenticate")
if www_auth:
error_details.append(f"WWW-Authenticate: {www_auth}")
if not auth:
error_details.append("No auth scheme provided")
msg = " | ".join(error_details)
auth_type = type(auth).__name__ if auth else None
crewai_event_bus.emit(
None,
A2AAuthenticationFailedEvent(
endpoint=endpoint,
auth_type=auth_type,
error=msg,
status_code=401,
metadata={
"elapsed_ms": elapsed_ms,
"response_body": response_body,
"www_authenticate": www_auth,
"request_url": str(e.request.url),
},
),
)
raise A2AClientHTTPError(401, msg) from e
crewai_event_bus.emit(
None,
A2AConnectionErrorEvent(
endpoint=endpoint,
error=str(e),
error_type="http_error",
status_code=e.response.status_code,
operation="fetch_agent_card",
metadata={
"elapsed_ms": elapsed_ms,
"response_body": response_body,
"request_url": str(e.request.url),
},
),
)
raise
except httpx.TimeoutException as e:
elapsed_ms = (time.perf_counter() - start_time) * 1000
crewai_event_bus.emit(
None,
A2AConnectionErrorEvent(
endpoint=endpoint,
error=str(e),
error_type="timeout",
operation="fetch_agent_card",
metadata={
"elapsed_ms": elapsed_ms,
"timeout_config": timeout,
"request_url": str(e.request.url) if e.request else None,
},
),
)
raise
except httpx.ConnectError as e:
elapsed_ms = (time.perf_counter() - start_time) * 1000
crewai_event_bus.emit(
None,
A2AConnectionErrorEvent(
endpoint=endpoint,
error=str(e),
error_type="connection_error",
operation="fetch_agent_card",
metadata={
"elapsed_ms": elapsed_ms,
"request_url": str(e.request.url) if e.request else None,
},
),
)
raise
except httpx.RequestError as e:
elapsed_ms = (time.perf_counter() - start_time) * 1000
crewai_event_bus.emit(
None,
A2AConnectionErrorEvent(
endpoint=endpoint,
error=str(e),
error_type="request_error",
operation="fetch_agent_card",
metadata={
"elapsed_ms": elapsed_ms,
"request_url": str(e.request.url) if e.request else None,
},
),
)
raise
def _task_to_skill(task: Task) -> AgentSkill:
"""Convert a CrewAI Task to an A2A AgentSkill.
Args:
task: The CrewAI Task to convert.
Returns:
AgentSkill representing the task's capability.
"""
task_name = task.name or task.description[:50]
task_id = task_name.lower().replace(" ", "_")
tags: list[str] = []
if task.agent:
tags.append(task.agent.role.lower().replace(" ", "-"))
return AgentSkill(
id=task_id,
name=task_name,
description=task.description,
tags=tags,
examples=[task.expected_output] if task.expected_output else None,
)
def _tool_to_skill(tool_name: str, tool_description: str) -> AgentSkill:
"""Convert an Agent's tool to an A2A AgentSkill.
Args:
tool_name: Name of the tool.
tool_description: Description of what the tool does.
Returns:
AgentSkill representing the tool's capability.
"""
tool_id = tool_name.lower().replace(" ", "_")
return AgentSkill(
id=tool_id,
name=tool_name,
description=tool_description,
tags=[tool_name.lower().replace(" ", "-")],
)
def _crew_to_agent_card(crew: Crew, url: str) -> AgentCard:
"""Generate an A2A AgentCard from a Crew instance.
Args:
crew: The Crew instance to generate a card for.
url: The base URL where this crew will be exposed.
Returns:
AgentCard describing the crew's capabilities.
"""
crew_name = getattr(crew, "name", None) or crew.__class__.__name__
description_parts: list[str] = []
crew_description = getattr(crew, "description", None)
if crew_description:
description_parts.append(crew_description)
else:
agent_roles = [agent.role for agent in crew.agents]
description_parts.append(
f"A crew of {len(crew.agents)} agents: {', '.join(agent_roles)}"
)
skills = [_task_to_skill(task) for task in crew.tasks]
return AgentCard(
name=crew_name,
description=" ".join(description_parts),
url=url,
version="1.0.0",
capabilities=AgentCapabilities(
streaming=True,
push_notifications=True,
),
default_input_modes=["text/plain", "application/json"],
default_output_modes=["text/plain", "application/json"],
skills=skills,
)
def _agent_to_agent_card(agent: Agent, url: str) -> AgentCard:
"""Generate an A2A AgentCard from an Agent instance.
Uses A2AServerConfig values when available, falling back to agent properties.
If signing_config is provided, the card will be signed with JWS.
Args:
agent: The Agent instance to generate a card for.
url: The base URL where this agent will be exposed.
Returns:
AgentCard describing the agent's capabilities.
"""
from crewai.a2a.utils.agent_card_signing import sign_agent_card
server_config = _get_server_config(agent) or A2AServerConfig()
name = server_config.name or agent.role
description_parts = [agent.goal]
if agent.backstory:
description_parts.append(agent.backstory)
description = server_config.description or " ".join(description_parts)
skills: list[AgentSkill] = (
server_config.skills.copy() if server_config.skills else []
)
if not skills:
if agent.tools:
for tool in agent.tools:
tool_name = getattr(tool, "name", None) or tool.__class__.__name__
tool_desc = getattr(tool, "description", None) or f"Tool: {tool_name}"
skills.append(_tool_to_skill(tool_name, tool_desc))
if not skills:
skills.append(
AgentSkill(
id=agent.role.lower().replace(" ", "_"),
name=agent.role,
description=agent.goal,
tags=[agent.role.lower().replace(" ", "-")],
)
)
capabilities = server_config.capabilities
if server_config.server_extensions:
from crewai.a2a.extensions.server import ServerExtensionRegistry
registry = ServerExtensionRegistry(server_config.server_extensions)
ext_list = registry.get_agent_extensions()
existing_exts = list(capabilities.extensions) if capabilities.extensions else []
existing_uris = {e.uri for e in existing_exts}
for ext in ext_list:
if ext.uri not in existing_uris:
existing_exts.append(ext)
capabilities = capabilities.model_copy(update={"extensions": existing_exts})
card = AgentCard(
name=name,
description=description,
url=server_config.url or url,
version=server_config.version,
capabilities=capabilities,
default_input_modes=server_config.default_input_modes,
default_output_modes=server_config.default_output_modes,
skills=skills,
preferred_transport=server_config.transport.preferred,
protocol_version=server_config.protocol_version,
provider=server_config.provider,
documentation_url=server_config.documentation_url,
icon_url=server_config.icon_url,
additional_interfaces=server_config.additional_interfaces,
security=server_config.security,
security_schemes=server_config.security_schemes,
supports_authenticated_extended_card=server_config.supports_authenticated_extended_card,
)
if server_config.signing_config:
signature = sign_agent_card(
card,
private_key=server_config.signing_config.get_private_key(),
key_id=server_config.signing_config.key_id,
algorithm=server_config.signing_config.algorithm,
)
card = card.model_copy(update={"signatures": [signature]})
elif server_config.signatures:
card = card.model_copy(update={"signatures": server_config.signatures})
return card
def inject_a2a_server_methods(agent: Agent) -> None:
"""Inject A2A server methods onto an Agent instance.
Adds a `to_agent_card(url: str) -> AgentCard` method to the agent
that generates an A2A-compliant AgentCard.
Only injects if the agent has an A2AServerConfig.
Args:
agent: The Agent instance to inject methods onto.
"""
if _get_server_config(agent) is None:
return
def _to_agent_card(self: Agent, url: str) -> AgentCard:
return _agent_to_agent_card(self, url)
object.__setattr__(agent, "to_agent_card", MethodType(_to_agent_card, agent))

View File

@@ -1,236 +0,0 @@
"""AgentCard JWS signing utilities.
This module provides functions for signing and verifying AgentCards using
JSON Web Signatures (JWS) as per RFC 7515. Signed agent cards allow clients
to verify the authenticity and integrity of agent card information.
Example:
>>> from crewai.a2a.utils.agent_card_signing import sign_agent_card
>>> signature = sign_agent_card(agent_card, private_key_pem, key_id="key-1")
>>> card_with_sig = card.model_copy(update={"signatures": [signature]})
"""
from __future__ import annotations
import base64
import json
import logging
from typing import Any, Literal
from a2a.types import AgentCard, AgentCardSignature
import jwt
from pydantic import SecretStr
logger = logging.getLogger(__name__)
SigningAlgorithm = Literal[
"RS256", "RS384", "RS512", "ES256", "ES384", "ES512", "PS256", "PS384", "PS512"
]
def _normalize_private_key(private_key: str | bytes | SecretStr) -> bytes:
"""Normalize private key to bytes format.
Args:
private_key: PEM-encoded private key as string, bytes, or SecretStr.
Returns:
Private key as bytes.
"""
if isinstance(private_key, SecretStr):
private_key = private_key.get_secret_value()
if isinstance(private_key, str):
private_key = private_key.encode()
return private_key
def _serialize_agent_card(agent_card: AgentCard) -> str:
"""Serialize AgentCard to canonical JSON for signing.
Excludes the signatures field to avoid circular reference during signing.
Uses sorted keys and compact separators for deterministic output.
Args:
agent_card: The AgentCard to serialize.
Returns:
Canonical JSON string representation.
"""
card_dict = agent_card.model_dump(exclude={"signatures"}, exclude_none=True)
return json.dumps(card_dict, sort_keys=True, separators=(",", ":"))
def _base64url_encode(data: bytes | str) -> str:
"""Encode data to URL-safe base64 without padding.
Args:
data: Data to encode.
Returns:
URL-safe base64 encoded string without padding.
"""
if isinstance(data, str):
data = data.encode()
return base64.urlsafe_b64encode(data).rstrip(b"=").decode("ascii")
def sign_agent_card(
agent_card: AgentCard,
private_key: str | bytes | SecretStr,
key_id: str | None = None,
algorithm: SigningAlgorithm = "RS256",
) -> AgentCardSignature:
"""Sign an AgentCard using JWS (RFC 7515).
Creates a detached JWS signature for the AgentCard. The signature covers
all fields except the signatures field itself.
Args:
agent_card: The AgentCard to sign.
private_key: PEM-encoded private key (RSA, EC, or RSA-PSS).
key_id: Optional key identifier for the JWS header (kid claim).
algorithm: Signing algorithm (RS256, ES256, PS256, etc.).
Returns:
AgentCardSignature with protected header and signature.
Raises:
jwt.exceptions.InvalidKeyError: If the private key is invalid.
ValueError: If the algorithm is not supported for the key type.
Example:
>>> signature = sign_agent_card(
... agent_card,
... private_key_pem="-----BEGIN PRIVATE KEY-----...",
... key_id="my-key-id",
... )
"""
key_bytes = _normalize_private_key(private_key)
payload = _serialize_agent_card(agent_card)
protected_header: dict[str, Any] = {"typ": "JWS"}
if key_id:
protected_header["kid"] = key_id
jws_token = jwt.api_jws.encode(
payload.encode(),
key_bytes,
algorithm=algorithm,
headers=protected_header,
)
parts = jws_token.split(".")
protected_b64 = parts[0]
signature_b64 = parts[2]
header: dict[str, Any] | None = None
if key_id:
header = {"kid": key_id}
return AgentCardSignature(
protected=protected_b64,
signature=signature_b64,
header=header,
)
def verify_agent_card_signature(
agent_card: AgentCard,
signature: AgentCardSignature,
public_key: str | bytes,
algorithms: list[str] | None = None,
) -> bool:
"""Verify an AgentCard JWS signature.
Validates that the signature was created with the corresponding private key
and that the AgentCard content has not been modified.
Args:
agent_card: The AgentCard to verify.
signature: The AgentCardSignature to validate.
public_key: PEM-encoded public key (RSA, EC, or RSA-PSS).
algorithms: List of allowed algorithms. Defaults to common asymmetric algorithms.
Returns:
True if signature is valid, False otherwise.
Example:
>>> is_valid = verify_agent_card_signature(
... agent_card, signature, public_key_pem="-----BEGIN PUBLIC KEY-----..."
... )
"""
if algorithms is None:
algorithms = [
"RS256",
"RS384",
"RS512",
"ES256",
"ES384",
"ES512",
"PS256",
"PS384",
"PS512",
]
if isinstance(public_key, str):
public_key = public_key.encode()
payload = _serialize_agent_card(agent_card)
payload_b64 = _base64url_encode(payload)
jws_token = f"{signature.protected}.{payload_b64}.{signature.signature}"
try:
jwt.api_jws.decode(
jws_token,
public_key,
algorithms=algorithms,
)
return True
except jwt.InvalidSignatureError:
logger.debug(
"AgentCard signature verification failed",
extra={"reason": "invalid_signature"},
)
return False
except jwt.DecodeError as e:
logger.debug(
"AgentCard signature verification failed",
extra={"reason": "decode_error", "error": str(e)},
)
return False
except jwt.InvalidAlgorithmError as e:
logger.debug(
"AgentCard signature verification failed",
extra={"reason": "algorithm_error", "error": str(e)},
)
return False
def get_key_id_from_signature(signature: AgentCardSignature) -> str | None:
"""Extract the key ID (kid) from an AgentCardSignature.
Checks both the unprotected header and the protected header for the kid claim.
Args:
signature: The AgentCardSignature to extract from.
Returns:
The key ID if present, None otherwise.
"""
if signature.header and "kid" in signature.header:
kid: str = signature.header["kid"]
return kid
try:
protected = signature.protected
padding_needed = 4 - (len(protected) % 4)
if padding_needed != 4:
protected += "=" * padding_needed
protected_json = base64.urlsafe_b64decode(protected).decode()
protected_header: dict[str, Any] = json.loads(protected_json)
return protected_header.get("kid")
except (ValueError, json.JSONDecodeError):
return None

View File

@@ -1,344 +0,0 @@
"""Content type negotiation for A2A protocol.
This module handles negotiation of input/output MIME types between A2A clients
and servers based on AgentCard capabilities.
"""
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING, Annotated, Final, Literal, cast
from a2a.types import Part
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.a2a_events import A2AContentTypeNegotiatedEvent
if TYPE_CHECKING:
from a2a.types import AgentCard, AgentSkill
TEXT_PLAIN: Literal["text/plain"] = "text/plain"
APPLICATION_JSON: Literal["application/json"] = "application/json"
IMAGE_PNG: Literal["image/png"] = "image/png"
IMAGE_JPEG: Literal["image/jpeg"] = "image/jpeg"
IMAGE_WILDCARD: Literal["image/*"] = "image/*"
APPLICATION_PDF: Literal["application/pdf"] = "application/pdf"
APPLICATION_OCTET_STREAM: Literal["application/octet-stream"] = (
"application/octet-stream"
)
APPLICATION_A2UI_JSON: Literal["application/json+a2ui"] = "application/json+a2ui"
DEFAULT_CLIENT_INPUT_MODES: Final[list[Literal["text/plain", "application/json"]]] = [
TEXT_PLAIN,
APPLICATION_JSON,
]
DEFAULT_CLIENT_OUTPUT_MODES: Final[list[Literal["text/plain", "application/json"]]] = [
TEXT_PLAIN,
APPLICATION_JSON,
]
@dataclass
class NegotiatedContentTypes:
"""Result of content type negotiation."""
input_modes: Annotated[list[str], "Negotiated input MIME types the client can send"]
output_modes: Annotated[
list[str], "Negotiated output MIME types the server will produce"
]
effective_input_modes: Annotated[list[str], "Server's effective input modes"]
effective_output_modes: Annotated[list[str], "Server's effective output modes"]
skill_name: Annotated[
str | None, "Skill name if negotiation was skill-specific"
] = None
class ContentTypeNegotiationError(Exception):
"""Raised when no compatible content types can be negotiated."""
def __init__(
self,
client_input_modes: list[str],
client_output_modes: list[str],
server_input_modes: list[str],
server_output_modes: list[str],
direction: str = "both",
message: str | None = None,
) -> None:
self.client_input_modes = client_input_modes
self.client_output_modes = client_output_modes
self.server_input_modes = server_input_modes
self.server_output_modes = server_output_modes
self.direction = direction
if message is None:
if direction == "input":
message = (
f"No compatible input content types. "
f"Client supports: {client_input_modes}, "
f"Server accepts: {server_input_modes}"
)
elif direction == "output":
message = (
f"No compatible output content types. "
f"Client accepts: {client_output_modes}, "
f"Server produces: {server_output_modes}"
)
else:
message = (
f"No compatible content types. "
f"Input - Client: {client_input_modes}, Server: {server_input_modes}. "
f"Output - Client: {client_output_modes}, Server: {server_output_modes}"
)
super().__init__(message)
def _normalize_mime_type(mime_type: str) -> str:
"""Normalize MIME type for comparison (lowercase, strip whitespace)."""
return mime_type.lower().strip()
def _mime_types_compatible(client_type: str, server_type: str) -> bool:
"""Check if two MIME types are compatible.
Handles wildcards like image/* matching image/png.
"""
client_normalized = _normalize_mime_type(client_type)
server_normalized = _normalize_mime_type(server_type)
if client_normalized == server_normalized:
return True
if "*" in client_normalized or "*" in server_normalized:
client_parts = client_normalized.split("/")
server_parts = server_normalized.split("/")
if len(client_parts) == 2 and len(server_parts) == 2:
type_match = (
client_parts[0] == server_parts[0]
or client_parts[0] == "*"
or server_parts[0] == "*"
)
subtype_match = (
client_parts[1] == server_parts[1]
or client_parts[1] == "*"
or server_parts[1] == "*"
)
return type_match and subtype_match
return False
def _find_compatible_modes(
client_modes: list[str], server_modes: list[str]
) -> list[str]:
"""Find compatible MIME types between client and server.
Returns modes in client preference order.
"""
compatible = []
for client_mode in client_modes:
for server_mode in server_modes:
if _mime_types_compatible(client_mode, server_mode):
if "*" in client_mode and "*" not in server_mode:
if server_mode not in compatible:
compatible.append(server_mode)
else:
if client_mode not in compatible:
compatible.append(client_mode)
break
return compatible
def _get_effective_modes(
agent_card: AgentCard,
skill_name: str | None = None,
) -> tuple[list[str], list[str], AgentSkill | None]:
"""Get effective input/output modes from agent card.
If skill_name is provided and the skill has custom modes, those are used.
Otherwise, falls back to agent card defaults.
"""
skill: AgentSkill | None = None
if skill_name and agent_card.skills:
for s in agent_card.skills:
if s.name == skill_name or s.id == skill_name:
skill = s
break
if skill:
input_modes = (
skill.input_modes if skill.input_modes else agent_card.default_input_modes
)
output_modes = (
skill.output_modes
if skill.output_modes
else agent_card.default_output_modes
)
else:
input_modes = agent_card.default_input_modes
output_modes = agent_card.default_output_modes
return input_modes, output_modes, skill
def negotiate_content_types(
agent_card: AgentCard,
client_input_modes: list[str] | None = None,
client_output_modes: list[str] | None = None,
skill_name: str | None = None,
emit_event: bool = True,
endpoint: str | None = None,
a2a_agent_name: str | None = None,
strict: bool = False,
) -> NegotiatedContentTypes:
"""Negotiate content types between client and server.
Args:
agent_card: The remote agent's card with capability info.
client_input_modes: MIME types the client can send. Defaults to text/plain and application/json.
client_output_modes: MIME types the client can accept. Defaults to text/plain and application/json.
skill_name: Optional skill to use for mode lookup.
emit_event: Whether to emit a content type negotiation event.
endpoint: Agent endpoint (for event metadata).
a2a_agent_name: Agent name (for event metadata).
strict: If True, raises error when no compatible types found.
If False, returns empty lists for incompatible directions.
Returns:
NegotiatedContentTypes with compatible input and output modes.
Raises:
ContentTypeNegotiationError: If strict=True and no compatible types found.
"""
if client_input_modes is None:
client_input_modes = cast(list[str], DEFAULT_CLIENT_INPUT_MODES.copy())
if client_output_modes is None:
client_output_modes = cast(list[str], DEFAULT_CLIENT_OUTPUT_MODES.copy())
server_input_modes, server_output_modes, skill = _get_effective_modes(
agent_card, skill_name
)
compatible_input = _find_compatible_modes(client_input_modes, server_input_modes)
compatible_output = _find_compatible_modes(client_output_modes, server_output_modes)
if strict:
if not compatible_input and not compatible_output:
raise ContentTypeNegotiationError(
client_input_modes=client_input_modes,
client_output_modes=client_output_modes,
server_input_modes=server_input_modes,
server_output_modes=server_output_modes,
)
if not compatible_input:
raise ContentTypeNegotiationError(
client_input_modes=client_input_modes,
client_output_modes=client_output_modes,
server_input_modes=server_input_modes,
server_output_modes=server_output_modes,
direction="input",
)
if not compatible_output:
raise ContentTypeNegotiationError(
client_input_modes=client_input_modes,
client_output_modes=client_output_modes,
server_input_modes=server_input_modes,
server_output_modes=server_output_modes,
direction="output",
)
result = NegotiatedContentTypes(
input_modes=compatible_input,
output_modes=compatible_output,
effective_input_modes=server_input_modes,
effective_output_modes=server_output_modes,
skill_name=skill.name if skill else None,
)
if emit_event:
crewai_event_bus.emit(
None,
A2AContentTypeNegotiatedEvent(
endpoint=endpoint or agent_card.url,
a2a_agent_name=a2a_agent_name or agent_card.name,
skill_name=skill_name,
client_input_modes=client_input_modes,
client_output_modes=client_output_modes,
server_input_modes=server_input_modes,
server_output_modes=server_output_modes,
negotiated_input_modes=compatible_input,
negotiated_output_modes=compatible_output,
negotiation_success=bool(compatible_input and compatible_output),
),
)
return result
def validate_content_type(
content_type: str,
allowed_modes: list[str],
) -> bool:
"""Validate that a content type is allowed by a list of modes.
Args:
content_type: The MIME type to validate.
allowed_modes: List of allowed MIME types (may include wildcards).
Returns:
True if content_type is compatible with any allowed mode.
"""
for mode in allowed_modes:
if _mime_types_compatible(content_type, mode):
return True
return False
def get_part_content_type(part: Part) -> str:
"""Extract MIME type from an A2A Part.
Args:
part: A Part object containing TextPart, DataPart, or FilePart.
Returns:
The MIME type string for this part.
"""
root = part.root
if root.kind == "text":
return TEXT_PLAIN
if root.kind == "data":
metadata = root.metadata or {}
mime = metadata.get("mimeType", "")
if mime == APPLICATION_A2UI_JSON:
return APPLICATION_A2UI_JSON
return APPLICATION_JSON
if root.kind == "file":
return root.file.mime_type or APPLICATION_OCTET_STREAM
return APPLICATION_OCTET_STREAM
def validate_message_parts(
parts: list[Part],
allowed_modes: list[str],
) -> list[str]:
"""Validate that all message parts have allowed content types.
Args:
parts: List of Parts from the incoming message.
allowed_modes: List of allowed MIME types (from default_input_modes).
Returns:
List of invalid content types found (empty if all valid).
"""
invalid_types: list[str] = []
for part in parts:
content_type = get_part_content_type(part)
if not validate_content_type(content_type, allowed_modes):
if content_type not in invalid_types:
invalid_types.append(content_type)
return invalid_types

View File

@@ -1,970 +0,0 @@
"""A2A delegation utilities for executing tasks on remote agents."""
from __future__ import annotations
import asyncio
import base64
from collections.abc import AsyncIterator, Callable, MutableMapping
import concurrent.futures
from contextlib import asynccontextmanager
import contextvars
import logging
from typing import TYPE_CHECKING, Any, Final, Literal
import uuid
from a2a.client import Client, ClientConfig, ClientFactory
from a2a.types import (
AgentCard,
FilePart,
FileWithBytes,
Message,
Part,
PushNotificationConfig as A2APushNotificationConfig,
Role,
TextPart,
)
import httpx
from pydantic import BaseModel
from crewai.a2a.auth.client_schemes import APIKeyAuth, HTTPDigestAuth
from crewai.a2a.auth.utils import (
_auth_store,
configure_auth_client,
validate_auth_against_agent_card,
)
from crewai.a2a.config import ClientTransportConfig, GRPCClientConfig
from crewai.a2a.extensions.registry import (
ExtensionsMiddleware,
validate_required_extensions,
)
from crewai.a2a.task_helpers import TaskStateResult
from crewai.a2a.types import (
HANDLER_REGISTRY,
HandlerType,
PartsDict,
PartsMetadataDict,
TransportType,
)
from crewai.a2a.updates import (
PollingConfig,
PushNotificationConfig,
StreamingHandler,
UpdateConfig,
)
from crewai.a2a.utils.agent_card import (
_afetch_agent_card_cached,
_get_tls_verify,
_prepare_auth_headers,
)
from crewai.a2a.utils.content_type import (
DEFAULT_CLIENT_OUTPUT_MODES,
negotiate_content_types,
)
from crewai.a2a.utils.transport import (
NegotiatedTransport,
TransportNegotiationError,
negotiate_transport,
)
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.a2a_events import (
A2AConversationStartedEvent,
A2ADelegationCompletedEvent,
A2ADelegationStartedEvent,
A2AMessageSentEvent,
)
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from a2a.types import Message
from crewai.a2a.auth.client_schemes import ClientAuthScheme
_DEFAULT_TRANSPORT: Final[TransportType] = "JSONRPC"
def _create_file_parts(input_files: dict[str, Any] | None) -> list[Part]:
"""Convert FileInput dictionary to FilePart objects.
Args:
input_files: Dictionary mapping names to FileInput objects.
Returns:
List of Part objects containing FilePart data.
"""
if not input_files:
return []
try:
import crewai_files # noqa: F401
except ImportError:
logger.debug("crewai_files not installed, skipping file parts")
return []
parts: list[Part] = []
for name, file_input in input_files.items():
content_bytes = file_input.read()
content_base64 = base64.b64encode(content_bytes).decode()
file_with_bytes = FileWithBytes(
bytes=content_base64,
mimeType=file_input.content_type,
name=file_input.filename or name,
)
parts.append(Part(root=FilePart(file=file_with_bytes)))
return parts
def get_handler(config: UpdateConfig | None) -> HandlerType:
"""Get the handler class for a given update config.
Args:
config: Update mechanism configuration.
Returns:
Handler class for the config type, defaults to StreamingHandler.
"""
if config is None:
return StreamingHandler
return HANDLER_REGISTRY.get(type(config), StreamingHandler)
def execute_a2a_delegation(
endpoint: str,
auth: ClientAuthScheme | None,
timeout: int,
task_description: str,
context: str | None = None,
context_id: str | None = None,
task_id: str | None = None,
reference_task_ids: list[str] | None = None,
metadata: dict[str, Any] | None = None,
extensions: dict[str, Any] | None = None,
conversation_history: list[Message] | None = None,
agent_id: str | None = None,
agent_role: Role | None = None,
agent_branch: Any | None = None,
response_model: type[BaseModel] | None = None,
turn_number: int | None = None,
updates: UpdateConfig | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
skill_id: str | None = None,
client_extensions: list[str] | None = None,
transport: ClientTransportConfig | None = None,
accepted_output_modes: list[str] | None = None,
input_files: dict[str, Any] | None = None,
) -> TaskStateResult:
"""Execute a task delegation to a remote A2A agent synchronously.
WARNING: This function blocks the entire thread by creating and running a new
event loop. Prefer using 'await aexecute_a2a_delegation()' in async contexts
for better performance and resource efficiency.
This is a synchronous wrapper around aexecute_a2a_delegation that creates a
new event loop to run the async implementation. It is provided for compatibility
with synchronous code paths only.
Args:
endpoint: A2A agent endpoint URL (AgentCard URL).
auth: Optional ClientAuthScheme for authentication.
timeout: Request timeout in seconds.
task_description: The task to delegate.
context: Optional context information.
context_id: Context ID for correlating messages/tasks.
task_id: Specific task identifier.
reference_task_ids: List of related task IDs.
metadata: Additional metadata.
extensions: Protocol extensions for custom fields.
conversation_history: Previous Message objects from conversation.
agent_id: Agent identifier for logging.
agent_role: Role of the CrewAI agent delegating the task.
agent_branch: Optional agent tree branch for logging.
response_model: Optional Pydantic model for structured outputs.
turn_number: Optional turn number for multi-turn conversations.
updates: Update mechanism config from A2AConfig.updates.
from_task: Optional CrewAI Task object for event metadata.
from_agent: Optional CrewAI Agent object for event metadata.
skill_id: Optional skill ID to target a specific agent capability.
client_extensions: A2A protocol extension URIs the client supports.
transport: Transport configuration (preferred, supported transports, gRPC settings).
accepted_output_modes: MIME types the client can accept in responses.
input_files: Optional dictionary of files to send to remote agent.
Returns:
TaskStateResult with status, result/error, history, and agent_card.
"""
coro = aexecute_a2a_delegation(
endpoint=endpoint,
auth=auth,
timeout=timeout,
task_description=task_description,
context=context,
context_id=context_id,
task_id=task_id,
reference_task_ids=reference_task_ids,
metadata=metadata,
extensions=extensions,
conversation_history=conversation_history,
agent_id=agent_id,
agent_role=agent_role,
agent_branch=agent_branch,
response_model=response_model,
turn_number=turn_number,
updates=updates,
from_task=from_task,
from_agent=from_agent,
skill_id=skill_id,
client_extensions=client_extensions,
transport=transport,
accepted_output_modes=accepted_output_modes,
input_files=input_files,
)
try:
asyncio.get_running_loop()
has_running_loop = True
except RuntimeError:
has_running_loop = False
if has_running_loop:
ctx = contextvars.copy_context()
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as pool:
return pool.submit(ctx.run, asyncio.run, coro).result()
return asyncio.run(coro)
async def aexecute_a2a_delegation(
endpoint: str,
auth: ClientAuthScheme | None,
timeout: int,
task_description: str,
context: str | None = None,
context_id: str | None = None,
task_id: str | None = None,
reference_task_ids: list[str] | None = None,
metadata: dict[str, Any] | None = None,
extensions: dict[str, Any] | None = None,
conversation_history: list[Message] | None = None,
agent_id: str | None = None,
agent_role: Role | None = None,
agent_branch: Any | None = None,
response_model: type[BaseModel] | None = None,
turn_number: int | None = None,
updates: UpdateConfig | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
skill_id: str | None = None,
client_extensions: list[str] | None = None,
transport: ClientTransportConfig | None = None,
accepted_output_modes: list[str] | None = None,
input_files: dict[str, Any] | None = None,
) -> TaskStateResult:
"""Execute a task delegation to a remote A2A agent asynchronously.
Native async implementation with multi-turn support. Use this when running
in an async context (e.g., with Crew.akickoff() or agent.aexecute_task()).
Args:
endpoint: A2A agent endpoint URL.
auth: Optional ClientAuthScheme for authentication.
timeout: Request timeout in seconds.
task_description: The task to delegate.
context: Optional context information.
context_id: Context ID for correlating messages/tasks.
task_id: Specific task identifier.
reference_task_ids: List of related task IDs.
metadata: Additional metadata.
extensions: Protocol extensions for custom fields.
conversation_history: Previous Message objects from conversation.
agent_id: Agent identifier for logging.
agent_role: Role of the CrewAI agent delegating the task.
agent_branch: Optional agent tree branch for logging.
response_model: Optional Pydantic model for structured outputs.
turn_number: Optional turn number for multi-turn conversations.
updates: Update mechanism config from A2AConfig.updates.
from_task: Optional CrewAI Task object for event metadata.
from_agent: Optional CrewAI Agent object for event metadata.
skill_id: Optional skill ID to target a specific agent capability.
client_extensions: A2A protocol extension URIs the client supports.
transport: Transport configuration (preferred, supported transports, gRPC settings).
accepted_output_modes: MIME types the client can accept in responses.
input_files: Optional dictionary of files to send to remote agent.
Returns:
TaskStateResult with status, result/error, history, and agent_card.
"""
if conversation_history is None:
conversation_history = []
is_multiturn = len(conversation_history) > 0
if turn_number is None:
turn_number = len([m for m in conversation_history if m.role == Role.user]) + 1
try:
result = await _aexecute_a2a_delegation_impl(
endpoint=endpoint,
auth=auth,
timeout=timeout,
task_description=task_description,
context=context,
context_id=context_id,
task_id=task_id,
reference_task_ids=reference_task_ids,
metadata=metadata,
extensions=extensions,
conversation_history=conversation_history,
is_multiturn=is_multiturn,
turn_number=turn_number,
agent_branch=agent_branch,
agent_id=agent_id,
agent_role=agent_role,
response_model=response_model,
updates=updates,
from_task=from_task,
from_agent=from_agent,
skill_id=skill_id,
client_extensions=client_extensions,
transport=transport,
accepted_output_modes=accepted_output_modes,
input_files=input_files,
)
except Exception as e:
crewai_event_bus.emit(
agent_branch,
A2ADelegationCompletedEvent(
status="failed",
result=None,
error=str(e),
context_id=context_id,
is_multiturn=is_multiturn,
endpoint=endpoint,
metadata=metadata,
extensions=list(extensions.keys()) if extensions else None,
from_task=from_task,
from_agent=from_agent,
),
)
raise
agent_card_data = result.get("agent_card")
crewai_event_bus.emit(
agent_branch,
A2ADelegationCompletedEvent(
status=result["status"],
result=result.get("result"),
error=result.get("error"),
context_id=context_id,
is_multiturn=is_multiturn,
endpoint=endpoint,
a2a_agent_name=result.get("a2a_agent_name"),
agent_card=agent_card_data,
provider=agent_card_data.get("provider") if agent_card_data else None,
metadata=metadata,
extensions=list(extensions.keys()) if extensions else None,
from_task=from_task,
from_agent=from_agent,
),
)
return result
async def _aexecute_a2a_delegation_impl(
endpoint: str,
auth: ClientAuthScheme | None,
timeout: int,
task_description: str,
context: str | None,
context_id: str | None,
task_id: str | None,
reference_task_ids: list[str] | None,
metadata: dict[str, Any] | None,
extensions: dict[str, Any] | None,
conversation_history: list[Message],
is_multiturn: bool,
turn_number: int,
agent_branch: Any | None,
agent_id: str | None,
agent_role: str | None,
response_model: type[BaseModel] | None,
updates: UpdateConfig | None,
from_task: Any | None = None,
from_agent: Any | None = None,
skill_id: str | None = None,
client_extensions: list[str] | None = None,
transport: ClientTransportConfig | None = None,
accepted_output_modes: list[str] | None = None,
input_files: dict[str, Any] | None = None,
) -> TaskStateResult:
"""Internal async implementation of A2A delegation."""
if transport is None:
transport = ClientTransportConfig()
if auth:
auth_data = auth.model_dump_json(
exclude={
"_access_token",
"_token_expires_at",
"_refresh_token",
"_authorization_callback",
}
)
auth_hash = _auth_store.compute_key(type(auth).__name__, auth_data)
else:
auth_hash = _auth_store.compute_key("none", endpoint)
_auth_store.set(auth_hash, auth)
agent_card = await _afetch_agent_card_cached(
endpoint=endpoint, auth_hash=auth_hash, timeout=timeout
)
validate_auth_against_agent_card(agent_card, auth)
unsupported_exts = validate_required_extensions(agent_card, client_extensions)
if unsupported_exts:
ext_uris = [ext.uri for ext in unsupported_exts]
raise ValueError(
f"Agent requires extensions not supported by client: {ext_uris}"
)
negotiated: NegotiatedTransport | None = None
effective_transport: TransportType = transport.preferred or _DEFAULT_TRANSPORT
effective_url = endpoint
client_transports: list[str] = (
list(transport.supported) if transport.supported else [_DEFAULT_TRANSPORT]
)
try:
negotiated = negotiate_transport(
agent_card=agent_card,
client_supported_transports=client_transports,
client_preferred_transport=transport.preferred,
endpoint=endpoint,
a2a_agent_name=agent_card.name,
)
effective_transport = negotiated.transport # type: ignore[assignment]
effective_url = negotiated.url
except TransportNegotiationError as e:
logger.warning(
"Transport negotiation failed, using fallback",
extra={
"error": str(e),
"fallback_transport": effective_transport,
"fallback_url": effective_url,
"endpoint": endpoint,
"client_transports": client_transports,
"server_transports": [
iface.transport for iface in agent_card.additional_interfaces or []
]
+ [agent_card.preferred_transport or "JSONRPC"],
},
)
effective_output_modes = accepted_output_modes or DEFAULT_CLIENT_OUTPUT_MODES.copy()
content_negotiated = negotiate_content_types(
agent_card=agent_card,
client_output_modes=accepted_output_modes,
skill_name=skill_id,
endpoint=endpoint,
a2a_agent_name=agent_card.name,
)
if content_negotiated.output_modes:
effective_output_modes = content_negotiated.output_modes
headers, _ = await _prepare_auth_headers(auth, timeout)
a2a_agent_name = None
if agent_card.name:
a2a_agent_name = agent_card.name
agent_card_dict = agent_card.model_dump(exclude_none=True)
crewai_event_bus.emit(
agent_branch,
A2ADelegationStartedEvent(
endpoint=endpoint,
task_description=task_description,
agent_id=agent_id or endpoint,
context_id=context_id,
is_multiturn=is_multiturn,
turn_number=turn_number,
a2a_agent_name=a2a_agent_name,
agent_card=agent_card_dict,
protocol_version=agent_card.protocol_version,
provider=agent_card_dict.get("provider"),
skill_id=skill_id,
metadata=metadata,
extensions=list(extensions.keys()) if extensions else None,
from_task=from_task,
from_agent=from_agent,
),
)
if turn_number == 1:
agent_id_for_event = agent_id or endpoint
crewai_event_bus.emit(
agent_branch,
A2AConversationStartedEvent(
agent_id=agent_id_for_event,
endpoint=endpoint,
context_id=context_id,
a2a_agent_name=a2a_agent_name,
agent_card=agent_card_dict,
protocol_version=agent_card.protocol_version,
provider=agent_card_dict.get("provider"),
skill_id=skill_id,
reference_task_ids=reference_task_ids,
metadata=metadata,
extensions=list(extensions.keys()) if extensions else None,
from_task=from_task,
from_agent=from_agent,
),
)
message_parts = []
if context:
message_parts.append(f"Context:\n{context}\n\n")
message_parts.append(f"{task_description}")
message_text = "".join(message_parts)
if is_multiturn and conversation_history and not task_id:
if first_task_id := conversation_history[0].task_id:
task_id = first_task_id
parts: PartsDict = {"text": message_text}
if response_model:
parts.update(
{
"metadata": PartsMetadataDict(
mimeType="application/json",
schema=response_model.model_json_schema(),
)
}
)
message_metadata = metadata.copy() if metadata else {}
if skill_id:
message_metadata["skill_id"] = skill_id
parts_list: list[Part] = [Part(root=TextPart(**parts))]
parts_list.extend(_create_file_parts(input_files))
message = Message(
role=Role.user,
message_id=str(uuid.uuid4()),
parts=parts_list,
context_id=context_id,
task_id=task_id,
reference_task_ids=reference_task_ids,
metadata=message_metadata if message_metadata else None,
extensions=extensions,
)
new_messages: list[Message] = [*conversation_history, message]
crewai_event_bus.emit(
None,
A2AMessageSentEvent(
message=message_text,
turn_number=turn_number,
context_id=context_id,
message_id=message.message_id,
is_multiturn=is_multiturn,
agent_role=agent_role,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
skill_id=skill_id,
metadata=message_metadata if message_metadata else None,
extensions=list(extensions.keys()) if extensions else None,
from_task=from_task,
from_agent=from_agent,
),
)
handler = get_handler(updates)
use_polling = isinstance(updates, PollingConfig)
handler_kwargs: dict[str, Any] = {
"turn_number": turn_number,
"is_multiturn": is_multiturn,
"agent_role": agent_role,
"context_id": context_id,
"task_id": task_id,
"endpoint": endpoint,
"agent_branch": agent_branch,
"a2a_agent_name": a2a_agent_name,
"from_task": from_task,
"from_agent": from_agent,
}
if isinstance(updates, PollingConfig):
handler_kwargs.update(
{
"polling_interval": updates.interval,
"polling_timeout": updates.timeout or float(timeout),
"history_length": updates.history_length,
"max_polls": updates.max_polls,
}
)
elif isinstance(updates, PushNotificationConfig):
handler_kwargs.update(
{
"config": updates,
"result_store": updates.result_store,
"polling_timeout": updates.timeout or float(timeout),
"polling_interval": updates.interval,
}
)
push_config_for_client = (
updates if isinstance(updates, PushNotificationConfig) else None
)
use_streaming = not use_polling and push_config_for_client is None
client_agent_card = agent_card
if effective_url != agent_card.url:
client_agent_card = agent_card.model_copy(update={"url": effective_url})
async with _create_a2a_client(
agent_card=client_agent_card,
transport_protocol=effective_transport,
timeout=timeout,
headers=headers,
streaming=use_streaming,
auth=auth,
use_polling=use_polling,
push_notification_config=push_config_for_client,
client_extensions=client_extensions,
accepted_output_modes=effective_output_modes, # type: ignore[arg-type]
grpc_config=transport.grpc,
) as client:
result = await handler.execute(
client=client,
message=message,
new_messages=new_messages,
agent_card=agent_card,
**handler_kwargs,
)
result["a2a_agent_name"] = a2a_agent_name
result["agent_card"] = agent_card.model_dump(exclude_none=True)
return result
def _normalize_grpc_metadata(
metadata: tuple[tuple[str, str], ...] | None,
) -> tuple[tuple[str, str], ...] | None:
"""Lowercase all gRPC metadata keys.
gRPC requires lowercase metadata keys, but some libraries (like the A2A SDK)
use mixed-case headers like 'X-A2A-Extensions'. This normalizes them.
"""
if metadata is None:
return None
return tuple((key.lower(), value) for key, value in metadata)
def _create_grpc_interceptors(
auth_metadata: list[tuple[str, str]] | None = None,
) -> list[Any]:
"""Create gRPC interceptors for metadata normalization and auth injection.
Args:
auth_metadata: Optional auth metadata to inject into all calls.
Used for insecure channels that need auth (non-localhost without TLS).
Returns a list of interceptors that lowercase metadata keys for gRPC
compatibility. Must be called after grpc is imported.
"""
import grpc.aio # type: ignore[import-untyped]
def _merge_metadata(
existing: tuple[tuple[str, str], ...] | None,
auth: list[tuple[str, str]] | None,
) -> tuple[tuple[str, str], ...] | None:
"""Merge existing metadata with auth metadata and normalize keys."""
merged: list[tuple[str, str]] = []
if existing:
merged.extend(existing)
if auth:
merged.extend(auth)
if not merged:
return None
return tuple((key.lower(), value) for key, value in merged)
def _inject_metadata(client_call_details: Any) -> Any:
"""Inject merged metadata into call details."""
return client_call_details._replace(
metadata=_merge_metadata(client_call_details.metadata, auth_metadata)
)
class MetadataUnaryUnary(grpc.aio.UnaryUnaryClientInterceptor): # type: ignore[misc,no-any-unimported]
"""Interceptor for unary-unary calls that injects auth metadata."""
async def intercept_unary_unary( # type: ignore[no-untyped-def]
self, continuation, client_call_details, request
):
"""Intercept unary-unary call and inject metadata."""
return await continuation(_inject_metadata(client_call_details), request)
class MetadataUnaryStream(grpc.aio.UnaryStreamClientInterceptor): # type: ignore[misc,no-any-unimported]
"""Interceptor for unary-stream calls that injects auth metadata."""
async def intercept_unary_stream( # type: ignore[no-untyped-def]
self, continuation, client_call_details, request
):
"""Intercept unary-stream call and inject metadata."""
return await continuation(_inject_metadata(client_call_details), request)
class MetadataStreamUnary(grpc.aio.StreamUnaryClientInterceptor): # type: ignore[misc,no-any-unimported]
"""Interceptor for stream-unary calls that injects auth metadata."""
async def intercept_stream_unary( # type: ignore[no-untyped-def]
self, continuation, client_call_details, request_iterator
):
"""Intercept stream-unary call and inject metadata."""
return await continuation(
_inject_metadata(client_call_details), request_iterator
)
class MetadataStreamStream(grpc.aio.StreamStreamClientInterceptor): # type: ignore[misc,no-any-unimported]
"""Interceptor for stream-stream calls that injects auth metadata."""
async def intercept_stream_stream( # type: ignore[no-untyped-def]
self, continuation, client_call_details, request_iterator
):
"""Intercept stream-stream call and inject metadata."""
return await continuation(
_inject_metadata(client_call_details), request_iterator
)
return [
MetadataUnaryUnary(),
MetadataUnaryStream(),
MetadataStreamUnary(),
MetadataStreamStream(),
]
def _create_grpc_channel_factory(
grpc_config: GRPCClientConfig,
auth: ClientAuthScheme | None = None,
) -> Callable[[str], Any]:
"""Create a gRPC channel factory with the given configuration.
Args:
grpc_config: gRPC client configuration with channel options.
auth: Optional ClientAuthScheme for TLS and auth configuration.
Returns:
A callable that creates gRPC channels from URLs.
"""
try:
import grpc
except ImportError as e:
raise ImportError(
"gRPC transport requires grpcio. Install with: pip install a2a-sdk[grpc]"
) from e
auth_metadata: list[tuple[str, str]] = []
if auth is not None:
from crewai.a2a.auth.client_schemes import (
APIKeyAuth,
BearerTokenAuth,
HTTPBasicAuth,
HTTPDigestAuth,
OAuth2AuthorizationCode,
OAuth2ClientCredentials,
)
if isinstance(auth, HTTPDigestAuth):
raise ValueError(
"HTTPDigestAuth is not supported with gRPC transport. "
"Digest authentication requires HTTP challenge-response flow. "
"Use BearerTokenAuth, HTTPBasicAuth, APIKeyAuth (header), or OAuth2 instead."
)
if isinstance(auth, APIKeyAuth) and auth.location in ("query", "cookie"):
raise ValueError(
f"APIKeyAuth with location='{auth.location}' is not supported with gRPC transport. "
"gRPC only supports header-based authentication. "
"Use APIKeyAuth with location='header' instead."
)
if isinstance(auth, BearerTokenAuth):
auth_metadata.append(("authorization", f"Bearer {auth.token}"))
elif isinstance(auth, HTTPBasicAuth):
import base64
basic_credentials = f"{auth.username}:{auth.password}"
encoded = base64.b64encode(basic_credentials.encode()).decode()
auth_metadata.append(("authorization", f"Basic {encoded}"))
elif isinstance(auth, APIKeyAuth) and auth.location == "header":
header_name = auth.name.lower()
auth_metadata.append((header_name, auth.api_key))
elif isinstance(auth, (OAuth2ClientCredentials, OAuth2AuthorizationCode)):
if auth._access_token:
auth_metadata.append(("authorization", f"Bearer {auth._access_token}"))
def factory(url: str) -> Any:
"""Create a gRPC channel for the given URL."""
target = url
use_tls = False
if url.startswith("grpcs://"):
target = url[8:]
use_tls = True
elif url.startswith("grpc://"):
target = url[7:]
elif url.startswith("https://"):
target = url[8:]
use_tls = True
elif url.startswith("http://"):
target = url[7:]
options: list[tuple[str, Any]] = []
if grpc_config.max_send_message_length is not None:
options.append(
("grpc.max_send_message_length", grpc_config.max_send_message_length)
)
if grpc_config.max_receive_message_length is not None:
options.append(
(
"grpc.max_receive_message_length",
grpc_config.max_receive_message_length,
)
)
if grpc_config.keepalive_time_ms is not None:
options.append(("grpc.keepalive_time_ms", grpc_config.keepalive_time_ms))
if grpc_config.keepalive_timeout_ms is not None:
options.append(
("grpc.keepalive_timeout_ms", grpc_config.keepalive_timeout_ms)
)
channel_credentials = None
if auth and hasattr(auth, "tls") and auth.tls:
channel_credentials = auth.tls.get_grpc_credentials()
elif use_tls:
channel_credentials = grpc.ssl_channel_credentials()
if channel_credentials and auth_metadata:
class AuthMetadataPlugin(grpc.AuthMetadataPlugin): # type: ignore[misc,no-any-unimported]
"""gRPC auth metadata plugin that adds auth headers as metadata."""
def __init__(self, metadata: list[tuple[str, str]]) -> None:
self._metadata = tuple(metadata)
def __call__( # type: ignore[no-any-unimported]
self,
context: grpc.AuthMetadataContext,
callback: grpc.AuthMetadataPluginCallback,
) -> None:
callback(self._metadata, None)
call_creds = grpc.metadata_call_credentials(
AuthMetadataPlugin(auth_metadata)
)
credentials = grpc.composite_channel_credentials(
channel_credentials, call_creds
)
interceptors = _create_grpc_interceptors()
return grpc.aio.secure_channel(
target, credentials, options=options or None, interceptors=interceptors
)
if channel_credentials:
interceptors = _create_grpc_interceptors()
return grpc.aio.secure_channel(
target,
channel_credentials,
options=options or None,
interceptors=interceptors,
)
interceptors = _create_grpc_interceptors(
auth_metadata=auth_metadata if auth_metadata else None
)
return grpc.aio.insecure_channel(
target, options=options or None, interceptors=interceptors
)
return factory
@asynccontextmanager
async def _create_a2a_client(
agent_card: AgentCard,
transport_protocol: Literal["JSONRPC", "GRPC", "HTTP+JSON"],
timeout: int,
headers: MutableMapping[str, str],
streaming: bool,
auth: ClientAuthScheme | None = None,
use_polling: bool = False,
push_notification_config: PushNotificationConfig | None = None,
client_extensions: list[str] | None = None,
accepted_output_modes: list[str] | None = None,
grpc_config: GRPCClientConfig | None = None,
) -> AsyncIterator[Client]:
"""Create and configure an A2A client.
Args:
agent_card: The A2A agent card.
transport_protocol: Transport protocol to use.
timeout: Request timeout in seconds.
headers: HTTP headers (already with auth applied).
streaming: Enable streaming responses.
auth: Optional ClientAuthScheme for client configuration.
use_polling: Enable polling mode.
push_notification_config: Optional push notification config.
client_extensions: A2A protocol extension URIs to declare support for.
accepted_output_modes: MIME types the client can accept in responses.
grpc_config: Optional gRPC client configuration.
Yields:
Configured A2A client instance.
"""
verify = _get_tls_verify(auth)
async with httpx.AsyncClient(
timeout=timeout,
headers=headers,
verify=verify,
) as httpx_client:
if auth and isinstance(auth, (HTTPDigestAuth, APIKeyAuth)):
configure_auth_client(auth, httpx_client)
push_configs: list[A2APushNotificationConfig] = []
if push_notification_config is not None:
push_configs.append(
A2APushNotificationConfig(
url=str(push_notification_config.url),
id=push_notification_config.id,
token=push_notification_config.token,
authentication=push_notification_config.authentication,
)
)
grpc_channel_factory = None
if transport_protocol == "GRPC":
grpc_channel_factory = _create_grpc_channel_factory(
grpc_config or GRPCClientConfig(),
auth=auth,
)
config = ClientConfig(
httpx_client=httpx_client,
supported_transports=[transport_protocol],
streaming=streaming and not use_polling,
polling=use_polling,
accepted_output_modes=accepted_output_modes or DEFAULT_CLIENT_OUTPUT_MODES, # type: ignore[arg-type]
push_notification_configs=push_configs,
grpc_channel_factory=grpc_channel_factory,
)
factory = ClientFactory(config)
client = factory.create(agent_card)
if client_extensions:
await client.add_request_middleware(ExtensionsMiddleware(client_extensions))
yield client

View File

@@ -1,131 +0,0 @@
"""Structured JSON logging utilities for A2A module."""
from __future__ import annotations
from contextvars import ContextVar
from datetime import datetime, timezone
import json
import logging
from typing import Any
_log_context: ContextVar[dict[str, Any] | None] = ContextVar(
"log_context", default=None
)
class JSONFormatter(logging.Formatter):
"""JSON formatter for structured logging.
Outputs logs as JSON with consistent fields for log aggregators.
"""
def format(self, record: logging.LogRecord) -> str:
"""Format log record as JSON string."""
log_data: dict[str, Any] = {
"timestamp": datetime.now(timezone.utc).isoformat(),
"level": record.levelname,
"logger": record.name,
"message": record.getMessage(),
}
if record.exc_info:
log_data["exception"] = self.formatException(record.exc_info)
context = _log_context.get()
if context is not None:
log_data.update(context)
if hasattr(record, "task_id"):
log_data["task_id"] = record.task_id
if hasattr(record, "context_id"):
log_data["context_id"] = record.context_id
if hasattr(record, "agent"):
log_data["agent"] = record.agent
if hasattr(record, "endpoint"):
log_data["endpoint"] = record.endpoint
if hasattr(record, "extension"):
log_data["extension"] = record.extension
if hasattr(record, "error"):
log_data["error"] = record.error
for key, value in record.__dict__.items():
if key.startswith("_") or key in (
"name",
"msg",
"args",
"created",
"filename",
"funcName",
"levelname",
"levelno",
"lineno",
"module",
"msecs",
"pathname",
"process",
"processName",
"relativeCreated",
"stack_info",
"exc_info",
"exc_text",
"thread",
"threadName",
"taskName",
"message",
):
continue
if key not in log_data:
log_data[key] = value
return json.dumps(log_data, default=str)
class LogContext:
"""Context manager for adding fields to all logs within a scope.
Example:
with LogContext(task_id="abc", context_id="xyz"):
logger.info("Processing task") # Includes task_id and context_id
"""
def __init__(self, **fields: Any) -> None:
self._fields = fields
self._token: Any = None
def __enter__(self) -> LogContext:
current = _log_context.get() or {}
new_context = {**current, **self._fields}
self._token = _log_context.set(new_context)
return self
def __exit__(self, *args: Any) -> None:
_log_context.reset(self._token)
def configure_json_logging(logger_name: str = "crewai.a2a") -> None:
"""Configure JSON logging for the A2A module.
Args:
logger_name: Logger name to configure.
"""
logger = logging.getLogger(logger_name)
for handler in logger.handlers[:]:
logger.removeHandler(handler)
handler = logging.StreamHandler()
handler.setFormatter(JSONFormatter())
logger.addHandler(handler)
def get_logger(name: str) -> logging.Logger:
"""Get a logger configured for structured JSON output.
Args:
name: Logger name.
Returns:
Configured logger instance.
"""
return logging.getLogger(name)

View File

@@ -1,100 +0,0 @@
"""Response model utilities for A2A agent interactions."""
from __future__ import annotations
from typing import TypeAlias
from pydantic import BaseModel, Field, create_model
from crewai.a2a.config import A2AClientConfig, A2AConfig, A2AServerConfig
from crewai.types.utils import create_literals_from_strings
A2AConfigTypes: TypeAlias = A2AConfig | A2AServerConfig | A2AClientConfig
A2AClientConfigTypes: TypeAlias = A2AConfig | A2AClientConfig
def create_agent_response_model(agent_ids: tuple[str, ...]) -> type[BaseModel] | None:
"""Create a dynamic AgentResponse model with Literal types for agent IDs.
Args:
agent_ids: List of available A2A agent IDs.
Returns:
Dynamically created Pydantic model with Literal-constrained a2a_ids field,
or None if agent_ids is empty.
"""
if not agent_ids:
return None
DynamicLiteral = create_literals_from_strings(agent_ids) # noqa: N806
return create_model(
"AgentResponse",
a2a_ids=(
tuple[DynamicLiteral, ...], # type: ignore[valid-type]
Field(
default_factory=tuple,
max_length=len(agent_ids),
description="A2A agent IDs to delegate to.",
),
),
message=(
str,
Field(
description="The message content. If is_a2a=true, this is sent to the A2A agent. If is_a2a=false, this is your final answer ending the conversation."
),
),
is_a2a=(
bool,
Field(
description="Set to false when the remote agent has answered your question - extract their answer and return it as your final message. Set to true ONLY if you need to ask a NEW, DIFFERENT question. NEVER repeat the same request - if the conversation history shows the agent already answered, set is_a2a=false immediately."
),
),
__base__=BaseModel,
)
def extract_a2a_agent_ids_from_config(
a2a_config: list[A2AConfigTypes] | A2AConfigTypes | None,
) -> tuple[list[A2AClientConfigTypes], tuple[str, ...]]:
"""Extract A2A agent IDs from A2A configuration.
Filters out A2AServerConfig since it doesn't have an endpoint for delegation.
Args:
a2a_config: A2A configuration (any type).
Returns:
Tuple of client A2A configs list and agent endpoint IDs.
"""
if a2a_config is None:
return [], ()
configs: list[A2AConfigTypes]
if isinstance(a2a_config, (A2AConfig, A2AClientConfig, A2AServerConfig)):
configs = [a2a_config]
else:
configs = a2a_config
client_configs: list[A2AClientConfigTypes] = [
config for config in configs if isinstance(config, (A2AConfig, A2AClientConfig))
]
return client_configs, tuple(config.endpoint for config in client_configs)
def get_a2a_agents_and_response_model(
a2a_config: list[A2AConfigTypes] | A2AConfigTypes | None,
) -> tuple[list[A2AClientConfigTypes], type[BaseModel] | None]:
"""Get A2A agent configs and response model.
Args:
a2a_config: A2A configuration (any type).
Returns:
Tuple of client A2A configs and response model.
"""
a2a_agents, agent_ids = extract_a2a_agent_ids_from_config(a2a_config=a2a_config)
return a2a_agents, create_agent_response_model(agent_ids)

View File

@@ -1,585 +0,0 @@
"""A2A task utilities for server-side task management."""
from __future__ import annotations
import asyncio
import base64
from collections.abc import Callable, Coroutine
from datetime import datetime
from functools import wraps
import json
import logging
import os
from typing import TYPE_CHECKING, Any, ParamSpec, TypeVar, cast
from urllib.parse import urlparse
from a2a.server.agent_execution import RequestContext
from a2a.server.events import EventQueue
from a2a.types import (
Artifact,
FileWithBytes,
FileWithUri,
InternalError,
InvalidParamsError,
Message,
Part,
Task as A2ATask,
TaskState,
TaskStatus,
TaskStatusUpdateEvent,
)
from a2a.utils import (
get_data_parts,
get_file_parts,
new_agent_text_message,
new_data_artifact,
new_text_artifact,
)
from a2a.utils.errors import ServerError
from aiocache import SimpleMemoryCache, caches # type: ignore[import-untyped]
from pydantic import BaseModel
from typing_extensions import TypedDict
from crewai.a2a.utils.agent_card import _get_server_config
from crewai.a2a.utils.content_type import validate_message_parts
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.a2a_events import (
A2AServerTaskCanceledEvent,
A2AServerTaskCompletedEvent,
A2AServerTaskFailedEvent,
A2AServerTaskStartedEvent,
)
from crewai.task import Task
from crewai.utilities.pydantic_schema_utils import create_model_from_schema
if TYPE_CHECKING:
from crewai.a2a.extensions.server import ExtensionContext, ServerExtensionRegistry
from crewai.agent import Agent
logger = logging.getLogger(__name__)
P = ParamSpec("P")
T = TypeVar("T")
class RedisCacheConfig(TypedDict, total=False):
"""Configuration for aiocache Redis backend."""
cache: str
endpoint: str
port: int
db: int
password: str
def _parse_redis_url(url: str) -> RedisCacheConfig:
"""Parse a Redis URL into aiocache configuration.
Args:
url: Redis connection URL (e.g., redis://localhost:6379/0).
Returns:
Configuration dict for aiocache.RedisCache.
"""
parsed = urlparse(url)
config: RedisCacheConfig = {
"cache": "aiocache.RedisCache",
"endpoint": parsed.hostname or "localhost",
"port": parsed.port or 6379,
}
if parsed.path and parsed.path != "/":
try:
config["db"] = int(parsed.path.lstrip("/"))
except ValueError:
pass
if parsed.password:
config["password"] = parsed.password
return config
_redis_url = os.environ.get("REDIS_URL")
caches.set_config(
{
"default": _parse_redis_url(_redis_url)
if _redis_url
else {
"cache": "aiocache.SimpleMemoryCache",
}
}
)
def cancellable(
fn: Callable[P, Coroutine[Any, Any, T]],
) -> Callable[P, Coroutine[Any, Any, T]]:
"""Decorator that enables cancellation for A2A task execution.
Runs a cancellation watcher concurrently with the wrapped function.
When a cancel event is published, the execution is cancelled.
Args:
fn: The async function to wrap.
Returns:
Wrapped function with cancellation support.
"""
@wraps(fn)
async def wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
"""Wrap function with cancellation monitoring."""
context: RequestContext | None = None
for arg in args:
if isinstance(arg, RequestContext):
context = arg
break
if context is None:
context = cast(RequestContext | None, kwargs.get("context"))
if context is None:
return await fn(*args, **kwargs)
task_id = context.task_id
cache = caches.get("default")
async def poll_for_cancel() -> bool:
"""Poll cache for cancellation flag."""
while True:
if await cache.get(f"cancel:{task_id}"):
return True
await asyncio.sleep(0.1)
async def watch_for_cancel() -> bool:
"""Watch for cancellation events via pub/sub or polling."""
if isinstance(cache, SimpleMemoryCache):
return await poll_for_cancel()
try:
client = cache.client
pubsub = client.pubsub()
await pubsub.subscribe(f"cancel:{task_id}")
async for message in pubsub.listen():
if message["type"] == "message":
return True
except (OSError, ConnectionError) as e:
logger.warning(
"Cancel watcher Redis error, falling back to polling",
extra={"task_id": task_id, "error": str(e)},
)
return await poll_for_cancel()
return False
execute_task = asyncio.create_task(fn(*args, **kwargs))
cancel_watch = asyncio.create_task(watch_for_cancel())
try:
done, _ = await asyncio.wait(
[execute_task, cancel_watch],
return_when=asyncio.FIRST_COMPLETED,
)
if cancel_watch in done:
execute_task.cancel()
try:
await execute_task
except asyncio.CancelledError:
pass
raise asyncio.CancelledError(f"Task {task_id} was cancelled")
cancel_watch.cancel()
return execute_task.result()
finally:
await cache.delete(f"cancel:{task_id}")
return wrapper
def _convert_a2a_files_to_file_inputs(
a2a_files: list[FileWithBytes | FileWithUri],
) -> dict[str, Any]:
"""Convert a2a file types to crewai FileInput dict.
Args:
a2a_files: List of FileWithBytes or FileWithUri from a2a SDK.
Returns:
Dictionary mapping file names to FileInput objects.
"""
try:
from crewai_files import File, FileBytes
except ImportError:
logger.debug("crewai_files not installed, returning empty file dict")
return {}
file_dict: dict[str, Any] = {}
for idx, a2a_file in enumerate(a2a_files):
if isinstance(a2a_file, FileWithBytes):
file_bytes = base64.b64decode(a2a_file.bytes)
name = a2a_file.name or f"file_{idx}"
file_source = FileBytes(data=file_bytes, filename=a2a_file.name)
file_dict[name] = File(source=file_source)
elif isinstance(a2a_file, FileWithUri):
name = a2a_file.name or f"file_{idx}"
file_dict[name] = File(source=a2a_file.uri)
return file_dict
def _extract_response_schema(parts: list[Part]) -> dict[str, Any] | None:
"""Extract response schema from message parts metadata.
The client may include a JSON schema in TextPart metadata to specify
the expected response format (see delegation.py line 463).
Args:
parts: List of message parts.
Returns:
JSON schema dict if found, None otherwise.
"""
for part in parts:
if part.root.kind == "text" and part.root.metadata:
schema = part.root.metadata.get("schema")
if schema and isinstance(schema, dict):
return schema # type: ignore[no-any-return]
return None
def _create_result_artifact(
result: Any,
task_id: str,
) -> Artifact:
"""Create artifact from task result, using DataPart for structured data.
Args:
result: The task execution result.
task_id: The task ID for naming the artifact.
Returns:
Artifact with appropriate part type (DataPart for dict/Pydantic, TextPart for strings).
"""
artifact_name = f"result_{task_id}"
if isinstance(result, dict):
return new_data_artifact(artifact_name, result)
if isinstance(result, BaseModel):
return new_data_artifact(artifact_name, result.model_dump())
return new_text_artifact(artifact_name, str(result))
def _build_task_description(
user_message: str,
structured_inputs: list[dict[str, Any]],
) -> str:
"""Build task description including structured data if present.
Args:
user_message: The original user message text.
structured_inputs: List of structured data from DataParts.
Returns:
Task description with structured data appended if present.
"""
if not structured_inputs:
return user_message
structured_json = json.dumps(structured_inputs, indent=2)
return f"{user_message}\n\nStructured Data:\n{structured_json}"
async def execute(
agent: Agent,
context: RequestContext,
event_queue: EventQueue,
) -> None:
"""Execute an A2A task using a CrewAI agent.
Args:
agent: The CrewAI agent to execute the task.
context: The A2A request context containing the user's message.
event_queue: The event queue for sending responses back.
"""
await _execute_impl(agent, context, event_queue, None, None)
@cancellable
async def _execute_impl(
agent: Agent,
context: RequestContext,
event_queue: EventQueue,
extension_registry: ServerExtensionRegistry | None,
extension_context: ExtensionContext | None,
) -> None:
"""Internal implementation for task execution with optional extensions."""
server_config = _get_server_config(agent)
if context.message and context.message.parts and server_config:
allowed_modes = server_config.default_input_modes
invalid_types = validate_message_parts(context.message.parts, allowed_modes)
if invalid_types:
raise ServerError(
InvalidParamsError(
message=f"Unsupported content type(s): {', '.join(invalid_types)}. "
f"Supported: {', '.join(allowed_modes)}"
)
)
if extension_registry and extension_context:
await extension_registry.invoke_on_request(extension_context)
user_message = context.get_user_input()
response_model: type[BaseModel] | None = None
structured_inputs: list[dict[str, Any]] = []
a2a_files: list[FileWithBytes | FileWithUri] = []
if context.message and context.message.parts:
schema = _extract_response_schema(context.message.parts)
if schema:
try:
response_model = create_model_from_schema(schema)
except Exception as e:
logger.debug(
"Failed to create response model from schema",
extra={"error": str(e), "schema_title": schema.get("title")},
)
structured_inputs = get_data_parts(context.message.parts)
a2a_files = get_file_parts(context.message.parts)
task_id = context.task_id
context_id = context.context_id
if task_id is None or context_id is None:
msg = "task_id and context_id are required"
crewai_event_bus.emit(
agent,
A2AServerTaskFailedEvent(
task_id="",
context_id="",
error=msg,
from_agent=agent,
),
)
raise ServerError(InvalidParamsError(message=msg)) from None
task = Task(
description=_build_task_description(user_message, structured_inputs),
expected_output="Response to the user's request",
agent=agent,
response_model=response_model,
input_files=_convert_a2a_files_to_file_inputs(a2a_files),
)
crewai_event_bus.emit(
agent,
A2AServerTaskStartedEvent(
task_id=task_id,
context_id=context_id,
from_task=task,
from_agent=agent,
),
)
try:
result = await agent.aexecute_task(task=task, tools=agent.tools)
if extension_registry and extension_context:
result = await extension_registry.invoke_on_response(
extension_context, result
)
result_str = str(result)
history: list[Message] = [context.message] if context.message else []
history.append(new_agent_text_message(result_str, context_id, task_id))
await event_queue.enqueue_event(
A2ATask(
id=task_id,
context_id=context_id,
status=TaskStatus(state=TaskState.completed),
artifacts=[_create_result_artifact(result, task_id)],
history=history,
)
)
crewai_event_bus.emit(
agent,
A2AServerTaskCompletedEvent(
task_id=task_id,
context_id=context_id,
result=str(result),
from_task=task,
from_agent=agent,
),
)
except asyncio.CancelledError:
crewai_event_bus.emit(
agent,
A2AServerTaskCanceledEvent(
task_id=task_id,
context_id=context_id,
from_task=task,
from_agent=agent,
),
)
raise
except Exception as e:
crewai_event_bus.emit(
agent,
A2AServerTaskFailedEvent(
task_id=task_id,
context_id=context_id,
error=str(e),
from_task=task,
from_agent=agent,
),
)
raise ServerError(
error=InternalError(message=f"Task execution failed: {e}")
) from e
async def execute_with_extensions(
agent: Agent,
context: RequestContext,
event_queue: EventQueue,
extension_registry: ServerExtensionRegistry,
extension_context: ExtensionContext,
) -> None:
"""Execute an A2A task with extension hooks.
Args:
agent: The CrewAI agent to execute the task.
context: The A2A request context containing the user's message.
event_queue: The event queue for sending responses back.
extension_registry: Registry of server extensions.
extension_context: Context for extension hooks.
"""
await _execute_impl(
agent, context, event_queue, extension_registry, extension_context
)
async def cancel(
context: RequestContext,
event_queue: EventQueue,
) -> A2ATask | None:
"""Cancel an A2A task.
Publishes a cancel event that the cancellable decorator listens for.
Args:
context: The A2A request context containing task information.
event_queue: The event queue for sending the cancellation status.
Returns:
The canceled task with updated status.
"""
task_id = context.task_id
context_id = context.context_id
if task_id is None or context_id is None:
raise ServerError(InvalidParamsError(message="task_id and context_id required"))
if context.current_task and context.current_task.status.state in (
TaskState.completed,
TaskState.failed,
TaskState.canceled,
):
return context.current_task
cache = caches.get("default")
await cache.set(f"cancel:{task_id}", True, ttl=3600)
if not isinstance(cache, SimpleMemoryCache):
await cache.client.publish(f"cancel:{task_id}", "cancel")
await event_queue.enqueue_event(
TaskStatusUpdateEvent(
task_id=task_id,
context_id=context_id,
status=TaskStatus(state=TaskState.canceled),
final=True,
)
)
if context.current_task:
context.current_task.status = TaskStatus(state=TaskState.canceled)
return context.current_task
return None
def list_tasks(
tasks: list[A2ATask],
context_id: str | None = None,
status: TaskState | None = None,
status_timestamp_after: datetime | None = None,
page_size: int = 50,
page_token: str | None = None,
history_length: int | None = None,
include_artifacts: bool = False,
) -> tuple[list[A2ATask], str | None, int]:
"""Filter and paginate A2A tasks.
Provides filtering by context, status, and timestamp, along with
cursor-based pagination. This is a pure utility function that operates
on an in-memory list of tasks - storage retrieval is handled separately.
Args:
tasks: All tasks to filter.
context_id: Filter by context ID to get tasks in a conversation.
status: Filter by task state (e.g., completed, working).
status_timestamp_after: Filter to tasks updated after this time.
page_size: Maximum tasks per page (default 50).
page_token: Base64-encoded cursor from previous response.
history_length: Limit history messages per task (None = full history).
include_artifacts: Whether to include task artifacts (default False).
Returns:
Tuple of (filtered_tasks, next_page_token, total_count).
- filtered_tasks: Tasks matching filters, paginated and trimmed.
- next_page_token: Token for next page, or None if no more pages.
- total_count: Total number of tasks matching filters (before pagination).
"""
filtered: list[A2ATask] = []
for task in tasks:
if context_id and task.context_id != context_id:
continue
if status and task.status.state != status:
continue
if status_timestamp_after and task.status.timestamp:
ts = datetime.fromisoformat(task.status.timestamp.replace("Z", "+00:00"))
if ts <= status_timestamp_after:
continue
filtered.append(task)
def get_timestamp(t: A2ATask) -> datetime:
"""Extract timestamp from task status for sorting."""
if t.status.timestamp is None:
return datetime.min
return datetime.fromisoformat(t.status.timestamp.replace("Z", "+00:00"))
filtered.sort(key=get_timestamp, reverse=True)
total = len(filtered)
start = 0
if page_token:
try:
cursor_id = base64.b64decode(page_token).decode()
for idx, task in enumerate(filtered):
if task.id == cursor_id:
start = idx + 1
break
except (ValueError, UnicodeDecodeError):
pass
page = filtered[start : start + page_size]
result: list[A2ATask] = []
for task in page:
task = task.model_copy(deep=True)
if history_length is not None and task.history:
task.history = task.history[-history_length:]
if not include_artifacts:
task.artifacts = None
result.append(task)
next_token: str | None = None
if result and len(result) == page_size:
next_token = base64.b64encode(result[-1].id.encode()).decode()
return result, next_token, total

View File

@@ -1,215 +0,0 @@
"""Transport negotiation utilities for A2A protocol.
This module provides functionality for negotiating the transport protocol
between an A2A client and server based on their respective capabilities
and preferences.
"""
from __future__ import annotations
from dataclasses import dataclass
from typing import Final, Literal
from a2a.types import AgentCard, AgentInterface
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.a2a_events import A2ATransportNegotiatedEvent
TransportProtocol = Literal["JSONRPC", "GRPC", "HTTP+JSON"]
NegotiationSource = Literal["client_preferred", "server_preferred", "fallback"]
JSONRPC_TRANSPORT: Literal["JSONRPC"] = "JSONRPC"
GRPC_TRANSPORT: Literal["GRPC"] = "GRPC"
HTTP_JSON_TRANSPORT: Literal["HTTP+JSON"] = "HTTP+JSON"
DEFAULT_TRANSPORT_PREFERENCE: Final[list[TransportProtocol]] = [
JSONRPC_TRANSPORT,
GRPC_TRANSPORT,
HTTP_JSON_TRANSPORT,
]
@dataclass
class NegotiatedTransport:
"""Result of transport negotiation.
Attributes:
transport: The negotiated transport protocol.
url: The URL to use for this transport.
source: How the transport was selected ('preferred', 'additional', 'fallback').
"""
transport: str
url: str
source: NegotiationSource
class TransportNegotiationError(Exception):
"""Raised when no compatible transport can be negotiated."""
def __init__(
self,
client_transports: list[str],
server_transports: list[str],
message: str | None = None,
) -> None:
"""Initialize the error with negotiation details.
Args:
client_transports: Transports supported by the client.
server_transports: Transports supported by the server.
message: Optional custom error message.
"""
self.client_transports = client_transports
self.server_transports = server_transports
if message is None:
message = (
f"No compatible transport found. "
f"Client supports: {client_transports}. "
f"Server supports: {server_transports}."
)
super().__init__(message)
def _get_server_interfaces(agent_card: AgentCard) -> list[AgentInterface]:
"""Extract all available interfaces from an AgentCard.
Creates a unified list of interfaces including the primary URL and
any additional interfaces declared by the agent.
Args:
agent_card: The agent's card containing transport information.
Returns:
List of AgentInterface objects representing all available endpoints.
"""
interfaces: list[AgentInterface] = []
primary_transport = agent_card.preferred_transport or JSONRPC_TRANSPORT
interfaces.append(
AgentInterface(
transport=primary_transport,
url=agent_card.url,
)
)
if agent_card.additional_interfaces:
for interface in agent_card.additional_interfaces:
is_duplicate = any(
i.url == interface.url and i.transport == interface.transport
for i in interfaces
)
if not is_duplicate:
interfaces.append(interface)
return interfaces
def negotiate_transport(
agent_card: AgentCard,
client_supported_transports: list[str] | None = None,
client_preferred_transport: str | None = None,
emit_event: bool = True,
endpoint: str | None = None,
a2a_agent_name: str | None = None,
) -> NegotiatedTransport:
"""Negotiate the transport protocol between client and server.
Compares the client's supported transports with the server's available
interfaces to find a compatible transport and URL.
Negotiation logic:
1. If client_preferred_transport is set and server supports it → use it
2. Otherwise, if server's preferred is in client's supported → use server's
3. Otherwise, find first match from client's supported in server's interfaces
Args:
agent_card: The server's AgentCard with transport information.
client_supported_transports: Transports the client can use.
Defaults to ["JSONRPC"] if not specified.
client_preferred_transport: Client's preferred transport. If set and
server supports it, takes priority over server preference.
emit_event: Whether to emit a transport negotiation event.
endpoint: Original endpoint URL for event metadata.
a2a_agent_name: Agent name for event metadata.
Returns:
NegotiatedTransport with the selected transport, URL, and source.
Raises:
TransportNegotiationError: If no compatible transport is found.
"""
if client_supported_transports is None:
client_supported_transports = [JSONRPC_TRANSPORT]
client_transports = [t.upper() for t in client_supported_transports]
client_preferred = (
client_preferred_transport.upper() if client_preferred_transport else None
)
server_interfaces = _get_server_interfaces(agent_card)
server_transports = [i.transport.upper() for i in server_interfaces]
transport_to_interface: dict[str, AgentInterface] = {}
for interface in server_interfaces:
transport_upper = interface.transport.upper()
if transport_upper not in transport_to_interface:
transport_to_interface[transport_upper] = interface
result: NegotiatedTransport | None = None
if client_preferred and client_preferred in transport_to_interface:
interface = transport_to_interface[client_preferred]
result = NegotiatedTransport(
transport=interface.transport,
url=interface.url,
source="client_preferred",
)
else:
server_preferred = (agent_card.preferred_transport or JSONRPC_TRANSPORT).upper()
if (
server_preferred in client_transports
and server_preferred in transport_to_interface
):
interface = transport_to_interface[server_preferred]
result = NegotiatedTransport(
transport=interface.transport,
url=interface.url,
source="server_preferred",
)
else:
for transport in client_transports:
if transport in transport_to_interface:
interface = transport_to_interface[transport]
result = NegotiatedTransport(
transport=interface.transport,
url=interface.url,
source="fallback",
)
break
if result is None:
raise TransportNegotiationError(
client_transports=client_transports,
server_transports=server_transports,
)
if emit_event:
crewai_event_bus.emit(
None,
A2ATransportNegotiatedEvent(
endpoint=endpoint or agent_card.url,
a2a_agent_name=a2a_agent_name or agent_card.name,
negotiated_transport=result.transport,
negotiated_url=result.url,
source=result.source,
client_supported_transports=client_transports,
server_supported_transports=server_transports,
server_preferred_transport=agent_card.preferred_transport
or JSONRPC_TRANSPORT,
client_preferred_transport=client_preferred,
),
)
return result

File diff suppressed because it is too large Load Diff

View File

@@ -1,319 +0,0 @@
"""Cross-validate A2UI Pydantic models against vendored JSON schemas.
Ensures the two validation sources stay in sync: representative payloads
must be accepted or rejected consistently by both the Pydantic models and
the JSON schemas.
"""
from __future__ import annotations
from typing import Any
import jsonschema
import pytest
from crewai.a2a.extensions.a2ui import catalog
from crewai.a2a.extensions.a2ui.models import A2UIEvent, A2UIMessage
from crewai.a2a.extensions.a2ui.schema import load_schema
SERVER_SCHEMA = load_schema("server_to_client")
CLIENT_SCHEMA = load_schema("client_to_server")
CATALOG_SCHEMA = load_schema("standard_catalog_definition")
def _json_schema_valid(schema: dict[str, Any], instance: dict[str, Any]) -> bool:
"""Return True if *instance* validates against *schema*."""
try:
jsonschema.validate(instance, schema)
return True
except jsonschema.ValidationError:
return False
def _pydantic_valid_message(data: dict[str, Any]) -> bool:
"""Return True if *data* validates as an A2UIMessage."""
try:
A2UIMessage.model_validate(data)
return True
except Exception:
return False
def _pydantic_valid_event(data: dict[str, Any]) -> bool:
"""Return True if *data* validates as an A2UIEvent."""
try:
A2UIEvent.model_validate(data)
return True
except Exception:
return False
# ---------------------------------------------------------------------------
# Valid server-to-client payloads
# ---------------------------------------------------------------------------
VALID_SERVER_MESSAGES: list[dict[str, Any]] = [
{
"beginRendering": {
"surfaceId": "s1",
"root": "root-col",
},
},
{
"beginRendering": {
"surfaceId": "s2",
"root": "root-col",
"catalogId": "standard (v0.8)",
"styles": {"primaryColor": "#FF0000", "font": "Roboto"},
},
},
{
"surfaceUpdate": {
"surfaceId": "s1",
"components": [
{
"id": "title",
"component": {
"Text": {"text": {"literalString": "Hello"}},
},
},
],
},
},
{
"surfaceUpdate": {
"surfaceId": "s1",
"components": [
{
"id": "weighted",
"weight": 2.0,
"component": {
"Column": {
"children": {"explicitList": ["a", "b"]},
},
},
},
],
},
},
{
"dataModelUpdate": {
"surfaceId": "s1",
"contents": [
{"key": "name", "valueString": "Alice"},
{"key": "score", "valueNumber": 42},
{"key": "active", "valueBoolean": True},
],
},
},
{
"dataModelUpdate": {
"surfaceId": "s1",
"path": "/user",
"contents": [
{
"key": "prefs",
"valueMap": [
{"key": "theme", "valueString": "dark"},
],
},
],
},
},
{
"deleteSurface": {"surfaceId": "s1"},
},
]
# ---------------------------------------------------------------------------
# Invalid server-to-client payloads
# ---------------------------------------------------------------------------
INVALID_SERVER_MESSAGES: list[dict[str, Any]] = [
{},
{"beginRendering": {"surfaceId": "s1"}},
{"surfaceUpdate": {"surfaceId": "s1", "components": []}},
{
"beginRendering": {"surfaceId": "s1", "root": "r"},
"deleteSurface": {"surfaceId": "s1"},
},
{"unknownType": {"surfaceId": "s1"}},
]
# ---------------------------------------------------------------------------
# Valid client-to-server payloads
# ---------------------------------------------------------------------------
VALID_CLIENT_EVENTS: list[dict[str, Any]] = [
{
"userAction": {
"name": "click",
"surfaceId": "s1",
"sourceComponentId": "btn-1",
"timestamp": "2026-03-12T10:00:00Z",
"context": {},
},
},
{
"userAction": {
"name": "submit",
"surfaceId": "s1",
"sourceComponentId": "btn-2",
"timestamp": "2026-03-12T10:00:00Z",
"context": {"field": "value"},
},
},
{
"error": {"message": "render failed", "code": 500},
},
]
# ---------------------------------------------------------------------------
# Invalid client-to-server payloads
# ---------------------------------------------------------------------------
INVALID_CLIENT_EVENTS: list[dict[str, Any]] = [
{},
{"userAction": {"name": "click"}},
{
"userAction": {
"name": "click",
"surfaceId": "s1",
"sourceComponentId": "btn-1",
"timestamp": "2026-03-12T10:00:00Z",
"context": {},
},
"error": {"message": "oops"},
},
]
# ---------------------------------------------------------------------------
# Catalog component payloads (validated structurally)
# ---------------------------------------------------------------------------
VALID_COMPONENTS: dict[str, dict[str, Any]] = {
"Text": {"text": {"literalString": "hello"}, "usageHint": "h1"},
"Image": {"url": {"path": "/img/url"}, "fit": "cover", "usageHint": "avatar"},
"Icon": {"name": {"literalString": "home"}},
"Video": {"url": {"literalString": "https://example.com/video.mp4"}},
"AudioPlayer": {"url": {"literalString": "https://example.com/audio.mp3"}},
"Row": {"children": {"explicitList": ["a", "b"]}, "distribution": "center"},
"Column": {"children": {"template": {"componentId": "c1", "dataBinding": "/list"}}},
"List": {"children": {"explicitList": ["x"]}, "direction": "horizontal"},
"Card": {"child": "inner"},
"Tabs": {"tabItems": [{"title": {"literalString": "Tab 1"}, "child": "content"}]},
"Divider": {"axis": "horizontal"},
"Modal": {"entryPointChild": "trigger", "contentChild": "body"},
"Button": {"child": "label", "action": {"name": "go"}},
"CheckBox": {"label": {"literalString": "Accept"}, "value": {"literalBoolean": False}},
"TextField": {"label": {"literalString": "Name"}},
"DateTimeInput": {"value": {"path": "/date"}},
"MultipleChoice": {
"selections": {"literalArray": ["a"]},
"options": [{"label": {"literalString": "A"}, "value": "a"}],
},
"Slider": {"value": {"literalNumber": 50}, "minValue": 0, "maxValue": 100},
}
class TestServerToClientConformance:
"""Pydantic models and JSON schema must agree on server-to-client messages."""
@pytest.mark.parametrize("payload", VALID_SERVER_MESSAGES)
def test_valid_accepted_by_both(self, payload: dict[str, Any]) -> None:
assert _json_schema_valid(SERVER_SCHEMA, payload), (
f"JSON schema rejected valid payload: {payload}"
)
assert _pydantic_valid_message(payload), (
f"Pydantic rejected valid payload: {payload}"
)
@pytest.mark.parametrize("payload", INVALID_SERVER_MESSAGES)
def test_invalid_rejected_by_pydantic(self, payload: dict[str, Any]) -> None:
assert not _pydantic_valid_message(payload), (
f"Pydantic accepted invalid payload: {payload}"
)
class TestClientToServerConformance:
"""Pydantic models and JSON schema must agree on client-to-server events."""
@pytest.mark.parametrize("payload", VALID_CLIENT_EVENTS)
def test_valid_accepted_by_both(self, payload: dict[str, Any]) -> None:
assert _json_schema_valid(CLIENT_SCHEMA, payload), (
f"JSON schema rejected valid payload: {payload}"
)
assert _pydantic_valid_event(payload), (
f"Pydantic rejected valid payload: {payload}"
)
@pytest.mark.parametrize("payload", INVALID_CLIENT_EVENTS)
def test_invalid_rejected_by_pydantic(self, payload: dict[str, Any]) -> None:
assert not _pydantic_valid_event(payload), (
f"Pydantic accepted invalid payload: {payload}"
)
class TestCatalogConformance:
"""Catalog component schemas and Pydantic models must define the same components."""
def test_catalog_component_names_match(self) -> None:
from crewai.a2a.extensions.a2ui.catalog import STANDARD_CATALOG_COMPONENTS
schema_components = set(CATALOG_SCHEMA["components"].keys())
assert schema_components == STANDARD_CATALOG_COMPONENTS
@pytest.mark.parametrize(
"name,props",
list(VALID_COMPONENTS.items()),
)
def test_valid_component_accepted_by_catalog_schema(
self, name: str, props: dict[str, Any]
) -> None:
component_schema = CATALOG_SCHEMA["components"][name]
assert _json_schema_valid(component_schema, props), (
f"Catalog schema rejected valid {name}: {props}"
)
@pytest.mark.parametrize(
"name,props",
list(VALID_COMPONENTS.items()),
)
def test_valid_component_accepted_by_pydantic(
self, name: str, props: dict[str, Any]
) -> None:
model_cls = getattr(catalog, name)
try:
model_cls.model_validate(props)
except Exception as exc:
pytest.fail(f"Pydantic {name} rejected valid props: {exc}")
def test_catalog_required_fields_match(self) -> None:
"""Required fields in the JSON schema match non-optional Pydantic fields."""
for comp_name, comp_schema in CATALOG_SCHEMA["components"].items():
schema_required = set(comp_schema.get("required", []))
model_cls = getattr(catalog, comp_name)
pydantic_required = {
info.alias or field_name
for field_name, info in model_cls.model_fields.items()
if info.is_required()
}
assert schema_required == pydantic_required, (
f"{comp_name}: schema requires {schema_required}, "
f"Pydantic requires {pydantic_required}"
)
def test_catalog_fields_match(self) -> None:
"""Field names in JSON schema match Pydantic model aliases."""
for comp_name, comp_schema in CATALOG_SCHEMA["components"].items():
schema_fields = set(comp_schema.get("properties", {}).keys())
model_cls = getattr(catalog, comp_name)
pydantic_fields = {
info.alias or field_name
for field_name, info in model_cls.model_fields.items()
}
assert schema_fields == pydantic_fields, (
f"{comp_name}: schema has {schema_fields}, "
f"Pydantic has {pydantic_fields}"
)

View File

@@ -1,327 +0,0 @@
from __future__ import annotations
import os
import uuid
import pytest
import pytest_asyncio
from a2a.client import ClientFactory
from a2a.types import AgentCard, Message, Part, Role, TaskState, TextPart
from crewai.a2a.updates.polling.handler import PollingHandler
from crewai.a2a.updates.streaming.handler import StreamingHandler
A2A_TEST_ENDPOINT = os.getenv("A2A_TEST_ENDPOINT", "http://localhost:9999")
@pytest_asyncio.fixture
async def a2a_client():
"""Create A2A client for test server."""
client = await ClientFactory.connect(A2A_TEST_ENDPOINT)
yield client
await client.close()
@pytest.fixture
def test_message() -> Message:
"""Create a simple test message."""
return Message(
role=Role.user,
parts=[Part(root=TextPart(text="What is 2 + 2?"))],
message_id=str(uuid.uuid4()),
)
@pytest_asyncio.fixture
async def agent_card(a2a_client) -> AgentCard:
"""Fetch the real agent card from the server."""
return await a2a_client.get_card()
class TestA2AAgentCardFetching:
"""Integration tests for agent card fetching."""
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_fetch_agent_card(self, a2a_client) -> None:
"""Test fetching an agent card from the server."""
card = await a2a_client.get_card()
assert card is not None
assert card.name == "GPT Assistant"
assert card.url is not None
assert card.capabilities is not None
assert card.capabilities.streaming is True
class TestA2APollingIntegration:
"""Integration tests for A2A polling handler."""
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_polling_completes_task(
self,
a2a_client,
test_message: Message,
agent_card: AgentCard,
) -> None:
"""Test that polling handler completes a task successfully."""
new_messages: list[Message] = []
result = await PollingHandler.execute(
client=a2a_client,
message=test_message,
new_messages=new_messages,
agent_card=agent_card,
polling_interval=0.5,
polling_timeout=30.0,
)
assert isinstance(result, dict)
assert result["status"] == TaskState.completed
assert result.get("result") is not None
assert "4" in result["result"]
class TestA2AStreamingIntegration:
"""Integration tests for A2A streaming handler."""
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_streaming_completes_task(
self,
a2a_client,
test_message: Message,
agent_card: AgentCard,
) -> None:
"""Test that streaming handler completes a task successfully."""
new_messages: list[Message] = []
result = await StreamingHandler.execute(
client=a2a_client,
message=test_message,
new_messages=new_messages,
agent_card=agent_card,
endpoint=agent_card.url,
)
assert isinstance(result, dict)
assert result["status"] == TaskState.completed
assert result.get("result") is not None
class TestA2ATaskOperations:
"""Integration tests for task operations."""
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_send_message_and_get_response(
self,
a2a_client,
test_message: Message,
) -> None:
"""Test sending a message and getting a response."""
from a2a.types import Task
final_task: Task | None = None
async for event in a2a_client.send_message(test_message):
if isinstance(event, tuple) and len(event) >= 1:
task, _ = event
if isinstance(task, Task):
final_task = task
assert final_task is not None
assert final_task.id is not None
assert final_task.status is not None
assert final_task.status.state == TaskState.completed
class TestA2APushNotificationHandler:
"""Tests for push notification handler.
These tests use mocks for the result store since webhook callbacks
are incoming requests that can't be recorded with VCR.
"""
@pytest.fixture
def mock_agent_card(self) -> AgentCard:
"""Create a minimal valid agent card for testing."""
from a2a.types import AgentCapabilities
return AgentCard(
name="Test Agent",
description="Test agent for push notification tests",
url="http://localhost:9999",
version="1.0.0",
capabilities=AgentCapabilities(streaming=True, push_notifications=True),
default_input_modes=["text"],
default_output_modes=["text"],
skills=[],
)
@pytest.fixture
def mock_task(self) -> "Task":
"""Create a minimal valid task for testing."""
from a2a.types import Task, TaskStatus
return Task(
id="task-123",
context_id="ctx-123",
status=TaskStatus(state=TaskState.working),
)
@pytest.mark.asyncio
async def test_push_handler_waits_for_result(
self,
mock_agent_card: AgentCard,
mock_task,
) -> None:
"""Test that push handler waits for result from store."""
from unittest.mock import AsyncMock, MagicMock
from a2a.types import Task, TaskStatus
from pydantic import AnyHttpUrl
from crewai.a2a.updates.push_notifications.config import PushNotificationConfig
from crewai.a2a.updates.push_notifications.handler import PushNotificationHandler
completed_task = Task(
id="task-123",
context_id="ctx-123",
status=TaskStatus(state=TaskState.completed),
history=[],
)
mock_store = MagicMock()
mock_store.wait_for_result = AsyncMock(return_value=completed_task)
async def mock_send_message(*args, **kwargs):
yield (mock_task, None)
mock_client = MagicMock()
mock_client.send_message = mock_send_message
config = PushNotificationConfig(
url=AnyHttpUrl("http://localhost:8080/a2a/callback"),
token="secret-token",
result_store=mock_store,
)
test_msg = Message(
role=Role.user,
parts=[Part(root=TextPart(text="What is 2+2?"))],
message_id="msg-001",
)
new_messages: list[Message] = []
result = await PushNotificationHandler.execute(
client=mock_client,
message=test_msg,
new_messages=new_messages,
agent_card=mock_agent_card,
config=config,
result_store=mock_store,
polling_timeout=30.0,
polling_interval=1.0,
endpoint=mock_agent_card.url,
)
mock_store.wait_for_result.assert_called_once_with(
task_id="task-123",
timeout=30.0,
poll_interval=1.0,
)
assert result["status"] == TaskState.completed
@pytest.mark.asyncio
async def test_push_handler_returns_failure_on_timeout(
self,
mock_agent_card: AgentCard,
) -> None:
"""Test that push handler returns failure when result store times out."""
from unittest.mock import AsyncMock, MagicMock
from a2a.types import Task, TaskStatus
from pydantic import AnyHttpUrl
from crewai.a2a.updates.push_notifications.config import PushNotificationConfig
from crewai.a2a.updates.push_notifications.handler import PushNotificationHandler
mock_store = MagicMock()
mock_store.wait_for_result = AsyncMock(return_value=None)
working_task = Task(
id="task-456",
context_id="ctx-456",
status=TaskStatus(state=TaskState.working),
)
async def mock_send_message(*args, **kwargs):
yield (working_task, None)
mock_client = MagicMock()
mock_client.send_message = mock_send_message
config = PushNotificationConfig(
url=AnyHttpUrl("http://localhost:8080/a2a/callback"),
token="token",
result_store=mock_store,
)
test_msg = Message(
role=Role.user,
parts=[Part(root=TextPart(text="test"))],
message_id="msg-002",
)
new_messages: list[Message] = []
result = await PushNotificationHandler.execute(
client=mock_client,
message=test_msg,
new_messages=new_messages,
agent_card=mock_agent_card,
config=config,
result_store=mock_store,
polling_timeout=5.0,
polling_interval=0.5,
endpoint=mock_agent_card.url,
)
assert result["status"] == TaskState.failed
assert "timeout" in result.get("error", "").lower()
@pytest.mark.asyncio
async def test_push_handler_requires_config(
self,
mock_agent_card: AgentCard,
) -> None:
"""Test that push handler fails gracefully without config."""
from unittest.mock import MagicMock
from crewai.a2a.updates.push_notifications.handler import PushNotificationHandler
mock_client = MagicMock()
test_msg = Message(
role=Role.user,
parts=[Part(root=TextPart(text="test"))],
message_id="msg-003",
)
new_messages: list[Message] = []
result = await PushNotificationHandler.execute(
client=mock_client,
message=test_msg,
new_messages=new_messages,
agent_card=mock_agent_card,
endpoint=mock_agent_card.url,
)
assert result["status"] == TaskState.failed
assert "config" in result.get("error", "").lower()

View File

@@ -1,325 +0,0 @@
"""Tests for A2A agent card utilities."""
from __future__ import annotations
from a2a.types import AgentCard, AgentSkill
from crewai import Agent
from crewai.a2a.config import A2AClientConfig, A2AServerConfig
from crewai.a2a.utils.agent_card import inject_a2a_server_methods
class TestInjectA2AServerMethods:
"""Tests for inject_a2a_server_methods function."""
def test_agent_with_server_config_gets_to_agent_card_method(self) -> None:
"""Agent with A2AServerConfig should have to_agent_card method injected."""
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
a2a=A2AServerConfig(),
)
assert hasattr(agent, "to_agent_card")
assert callable(agent.to_agent_card)
def test_agent_without_server_config_no_injection(self) -> None:
"""Agent without A2AServerConfig should not get to_agent_card method."""
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
a2a=A2AClientConfig(endpoint="http://example.com"),
)
assert not hasattr(agent, "to_agent_card")
def test_agent_without_a2a_no_injection(self) -> None:
"""Agent without any a2a config should not get to_agent_card method."""
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
)
assert not hasattr(agent, "to_agent_card")
def test_agent_with_mixed_configs_gets_injection(self) -> None:
"""Agent with list containing A2AServerConfig should get to_agent_card."""
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
a2a=[
A2AClientConfig(endpoint="http://example.com"),
A2AServerConfig(name="My Agent"),
],
)
assert hasattr(agent, "to_agent_card")
assert callable(agent.to_agent_card)
def test_manual_injection_on_plain_agent(self) -> None:
"""inject_a2a_server_methods should work when called manually."""
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
)
# Manually set server config and inject
object.__setattr__(agent, "a2a", A2AServerConfig())
inject_a2a_server_methods(agent)
assert hasattr(agent, "to_agent_card")
assert callable(agent.to_agent_card)
class TestToAgentCard:
"""Tests for the injected to_agent_card method."""
def test_returns_agent_card(self) -> None:
"""to_agent_card should return an AgentCard instance."""
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
a2a=A2AServerConfig(),
)
card = agent.to_agent_card("http://localhost:8000")
assert isinstance(card, AgentCard)
def test_uses_agent_role_as_name(self) -> None:
"""AgentCard name should default to agent role."""
agent = Agent(
role="Data Analyst",
goal="Analyze data",
backstory="Expert analyst",
a2a=A2AServerConfig(),
)
card = agent.to_agent_card("http://localhost:8000")
assert card.name == "Data Analyst"
def test_uses_server_config_name(self) -> None:
"""AgentCard name should prefer A2AServerConfig.name over role."""
agent = Agent(
role="Data Analyst",
goal="Analyze data",
backstory="Expert analyst",
a2a=A2AServerConfig(name="Custom Agent Name"),
)
card = agent.to_agent_card("http://localhost:8000")
assert card.name == "Custom Agent Name"
def test_uses_goal_as_description(self) -> None:
"""AgentCard description should include agent goal."""
agent = Agent(
role="Test Agent",
goal="Accomplish important tasks",
backstory="Has extensive experience",
a2a=A2AServerConfig(),
)
card = agent.to_agent_card("http://localhost:8000")
assert "Accomplish important tasks" in card.description
def test_uses_server_config_description(self) -> None:
"""AgentCard description should prefer A2AServerConfig.description."""
agent = Agent(
role="Test Agent",
goal="Accomplish important tasks",
backstory="Has extensive experience",
a2a=A2AServerConfig(description="Custom description"),
)
card = agent.to_agent_card("http://localhost:8000")
assert card.description == "Custom description"
def test_uses_provided_url(self) -> None:
"""AgentCard url should use the provided URL."""
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
a2a=A2AServerConfig(),
)
card = agent.to_agent_card("http://my-server.com:9000")
assert card.url == "http://my-server.com:9000"
def test_uses_server_config_url(self) -> None:
"""AgentCard url should prefer A2AServerConfig.url over provided URL."""
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
a2a=A2AServerConfig(url="http://configured-url.com"),
)
card = agent.to_agent_card("http://fallback-url.com")
assert card.url == "http://configured-url.com/"
def test_generates_default_skill(self) -> None:
"""AgentCard should have at least one skill based on agent role."""
agent = Agent(
role="Research Assistant",
goal="Help with research",
backstory="Skilled researcher",
a2a=A2AServerConfig(),
)
card = agent.to_agent_card("http://localhost:8000")
assert len(card.skills) >= 1
skill = card.skills[0]
assert skill.name == "Research Assistant"
assert skill.description == "Help with research"
def test_uses_server_config_skills(self) -> None:
"""AgentCard skills should prefer A2AServerConfig.skills."""
custom_skill = AgentSkill(
id="custom-skill",
name="Custom Skill",
description="A custom skill",
tags=["custom"],
)
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
a2a=A2AServerConfig(skills=[custom_skill]),
)
card = agent.to_agent_card("http://localhost:8000")
assert len(card.skills) == 1
assert card.skills[0].id == "custom-skill"
assert card.skills[0].name == "Custom Skill"
def test_includes_custom_version(self) -> None:
"""AgentCard should include version from A2AServerConfig."""
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
a2a=A2AServerConfig(version="2.0.0"),
)
card = agent.to_agent_card("http://localhost:8000")
assert card.version == "2.0.0"
def test_default_version(self) -> None:
"""AgentCard should have default version 1.0.0."""
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
a2a=A2AServerConfig(),
)
card = agent.to_agent_card("http://localhost:8000")
assert card.version == "1.0.0"
class TestAgentCardJsonStructure:
"""Tests for the JSON structure of AgentCard."""
def test_json_has_required_fields(self) -> None:
"""AgentCard JSON should contain all required A2A protocol fields."""
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
a2a=A2AServerConfig(),
)
card = agent.to_agent_card("http://localhost:8000")
json_data = card.model_dump()
assert "name" in json_data
assert "description" in json_data
assert "url" in json_data
assert "version" in json_data
assert "skills" in json_data
assert "capabilities" in json_data
assert "defaultInputModes" in json_data
assert "defaultOutputModes" in json_data
def test_json_skills_structure(self) -> None:
"""Each skill in JSON should have required fields."""
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
a2a=A2AServerConfig(),
)
card = agent.to_agent_card("http://localhost:8000")
json_data = card.model_dump()
assert len(json_data["skills"]) >= 1
skill = json_data["skills"][0]
assert "id" in skill
assert "name" in skill
assert "description" in skill
assert "tags" in skill
def test_json_capabilities_structure(self) -> None:
"""Capabilities in JSON should have expected fields."""
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
a2a=A2AServerConfig(),
)
card = agent.to_agent_card("http://localhost:8000")
json_data = card.model_dump()
capabilities = json_data["capabilities"]
assert "streaming" in capabilities
assert "pushNotifications" in capabilities
def test_json_serializable(self) -> None:
"""AgentCard should be JSON serializable."""
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
a2a=A2AServerConfig(),
)
card = agent.to_agent_card("http://localhost:8000")
json_str = card.model_dump_json()
assert isinstance(json_str, str)
assert "Test Agent" in json_str
assert "http://localhost:8000" in json_str
def test_json_excludes_none_values(self) -> None:
"""AgentCard JSON with exclude_none should omit None fields."""
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
a2a=A2AServerConfig(),
)
card = agent.to_agent_card("http://localhost:8000")
json_data = card.model_dump(exclude_none=True)
assert "provider" not in json_data
assert "documentationUrl" not in json_data
assert "iconUrl" not in json_data

View File

@@ -1,375 +0,0 @@
"""Tests for A2A task utilities."""
from __future__ import annotations
import asyncio
from typing import Any
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
import pytest_asyncio
from a2a.server.agent_execution import RequestContext
from a2a.server.events import EventQueue
from a2a.types import Message, Task as A2ATask, TaskState, TaskStatus
from crewai.a2a.utils.task import cancel, cancellable, execute
@pytest.fixture
def mock_agent() -> MagicMock:
"""Create a mock CrewAI agent."""
agent = MagicMock()
agent.role = "Test Agent"
agent.tools = []
agent.aexecute_task = AsyncMock(return_value="Task completed successfully")
return agent
@pytest.fixture
def mock_task(mock_context: MagicMock) -> MagicMock:
"""Create a mock Task."""
task = MagicMock()
task.id = mock_context.task_id
task.name = "Mock Task"
task.description = "Mock task description"
return task
@pytest.fixture
def mock_context() -> MagicMock:
"""Create a mock RequestContext."""
context = MagicMock(spec=RequestContext)
context.task_id = "test-task-123"
context.context_id = "test-context-456"
context.get_user_input.return_value = "Test user message"
context.message = MagicMock(spec=Message)
context.message.parts = []
context.current_task = None
return context
@pytest.fixture
def mock_event_queue() -> AsyncMock:
"""Create a mock EventQueue."""
queue = AsyncMock(spec=EventQueue)
queue.enqueue_event = AsyncMock()
return queue
@pytest_asyncio.fixture(autouse=True)
async def clear_cache(mock_context: MagicMock) -> None:
"""Clear cancel flag from cache before each test."""
from aiocache import caches
cache = caches.get("default")
await cache.delete(f"cancel:{mock_context.task_id}")
class TestCancellableDecorator:
"""Tests for the cancellable decorator."""
@pytest.mark.asyncio
async def test_executes_function_without_context(self) -> None:
"""Function executes normally when no RequestContext is provided."""
call_count = 0
@cancellable
async def my_func(value: int) -> int:
nonlocal call_count
call_count += 1
return value * 2
result = await my_func(5)
assert result == 10
assert call_count == 1
@pytest.mark.asyncio
async def test_executes_function_with_context(self, mock_context: MagicMock) -> None:
"""Function executes normally with RequestContext when not cancelled."""
@cancellable
async def my_func(context: RequestContext) -> str:
await asyncio.sleep(0.01)
return "completed"
result = await my_func(mock_context)
assert result == "completed"
@pytest.mark.asyncio
async def test_cancellation_raises_cancelled_error(
self, mock_context: MagicMock
) -> None:
"""Function raises CancelledError when cancel flag is set."""
from aiocache import caches
cache = caches.get("default")
@cancellable
async def slow_func(context: RequestContext) -> str:
await asyncio.sleep(1.0)
return "should not reach"
await cache.set(f"cancel:{mock_context.task_id}", True)
with pytest.raises(asyncio.CancelledError):
await slow_func(mock_context)
@pytest.mark.asyncio
async def test_cleanup_removes_cancel_flag(self, mock_context: MagicMock) -> None:
"""Cancel flag is cleaned up after execution."""
from aiocache import caches
cache = caches.get("default")
@cancellable
async def quick_func(context: RequestContext) -> str:
return "done"
await quick_func(mock_context)
flag = await cache.get(f"cancel:{mock_context.task_id}")
assert flag is None
@pytest.mark.asyncio
async def test_extracts_context_from_kwargs(self, mock_context: MagicMock) -> None:
"""Context can be passed as keyword argument."""
@cancellable
async def my_func(value: int, context: RequestContext | None = None) -> int:
return value + 1
result = await my_func(10, context=mock_context)
assert result == 11
class TestExecute:
"""Tests for the execute function."""
@pytest.mark.asyncio
async def test_successful_execution(
self,
mock_agent: MagicMock,
mock_context: MagicMock,
mock_event_queue: AsyncMock,
mock_task: MagicMock,
) -> None:
"""Execute completes successfully and enqueues completed task."""
with (
patch("crewai.a2a.utils.task.Task", return_value=mock_task),
patch("crewai.a2a.utils.task.crewai_event_bus") as mock_bus,
):
await execute(mock_agent, mock_context, mock_event_queue)
mock_agent.aexecute_task.assert_called_once()
mock_event_queue.enqueue_event.assert_called_once()
assert mock_bus.emit.call_count == 2
@pytest.mark.asyncio
async def test_emits_started_event(
self,
mock_agent: MagicMock,
mock_context: MagicMock,
mock_event_queue: AsyncMock,
mock_task: MagicMock,
) -> None:
"""Execute emits A2AServerTaskStartedEvent."""
with (
patch("crewai.a2a.utils.task.Task", return_value=mock_task),
patch("crewai.a2a.utils.task.crewai_event_bus") as mock_bus,
):
await execute(mock_agent, mock_context, mock_event_queue)
first_call = mock_bus.emit.call_args_list[0]
event = first_call[0][1]
assert event.type == "a2a_server_task_started"
assert event.task_id == mock_context.task_id
assert event.context_id == mock_context.context_id
@pytest.mark.asyncio
async def test_emits_completed_event(
self,
mock_agent: MagicMock,
mock_context: MagicMock,
mock_event_queue: AsyncMock,
mock_task: MagicMock,
) -> None:
"""Execute emits A2AServerTaskCompletedEvent on success."""
with (
patch("crewai.a2a.utils.task.Task", return_value=mock_task),
patch("crewai.a2a.utils.task.crewai_event_bus") as mock_bus,
):
await execute(mock_agent, mock_context, mock_event_queue)
second_call = mock_bus.emit.call_args_list[1]
event = second_call[0][1]
assert event.type == "a2a_server_task_completed"
assert event.task_id == mock_context.task_id
assert event.result == "Task completed successfully"
@pytest.mark.asyncio
async def test_emits_failed_event_on_exception(
self,
mock_agent: MagicMock,
mock_context: MagicMock,
mock_event_queue: AsyncMock,
mock_task: MagicMock,
) -> None:
"""Execute emits A2AServerTaskFailedEvent on exception."""
mock_agent.aexecute_task = AsyncMock(side_effect=ValueError("Test error"))
with (
patch("crewai.a2a.utils.task.Task", return_value=mock_task),
patch("crewai.a2a.utils.task.crewai_event_bus") as mock_bus,
):
with pytest.raises(Exception):
await execute(mock_agent, mock_context, mock_event_queue)
failed_call = mock_bus.emit.call_args_list[1]
event = failed_call[0][1]
assert event.type == "a2a_server_task_failed"
assert "Test error" in event.error
@pytest.mark.asyncio
async def test_emits_canceled_event_on_cancellation(
self,
mock_agent: MagicMock,
mock_context: MagicMock,
mock_event_queue: AsyncMock,
mock_task: MagicMock,
) -> None:
"""Execute emits A2AServerTaskCanceledEvent on CancelledError."""
mock_agent.aexecute_task = AsyncMock(side_effect=asyncio.CancelledError())
with (
patch("crewai.a2a.utils.task.Task", return_value=mock_task),
patch("crewai.a2a.utils.task.crewai_event_bus") as mock_bus,
):
with pytest.raises(asyncio.CancelledError):
await execute(mock_agent, mock_context, mock_event_queue)
canceled_call = mock_bus.emit.call_args_list[1]
event = canceled_call[0][1]
assert event.type == "a2a_server_task_canceled"
assert event.task_id == mock_context.task_id
class TestCancel:
"""Tests for the cancel function."""
@pytest.mark.asyncio
async def test_sets_cancel_flag_in_cache(
self,
mock_context: MagicMock,
mock_event_queue: AsyncMock,
) -> None:
"""Cancel sets the cancel flag in cache."""
from aiocache import caches
cache = caches.get("default")
await cancel(mock_context, mock_event_queue)
flag = await cache.get(f"cancel:{mock_context.task_id}")
assert flag is True
@pytest.mark.asyncio
async def test_enqueues_task_status_update_event(
self,
mock_context: MagicMock,
mock_event_queue: AsyncMock,
) -> None:
"""Cancel enqueues TaskStatusUpdateEvent with canceled state."""
await cancel(mock_context, mock_event_queue)
mock_event_queue.enqueue_event.assert_called_once()
event = mock_event_queue.enqueue_event.call_args[0][0]
assert event.task_id == mock_context.task_id
assert event.context_id == mock_context.context_id
assert event.status.state == TaskState.canceled
assert event.final is True
@pytest.mark.asyncio
async def test_returns_none_when_no_current_task(
self,
mock_context: MagicMock,
mock_event_queue: AsyncMock,
) -> None:
"""Cancel returns None when context has no current_task."""
mock_context.current_task = None
result = await cancel(mock_context, mock_event_queue)
assert result is None
@pytest.mark.asyncio
async def test_returns_updated_task_when_current_task_exists(
self,
mock_context: MagicMock,
mock_event_queue: AsyncMock,
) -> None:
"""Cancel returns updated task when context has current_task."""
current_task = MagicMock(spec=A2ATask)
current_task.status = TaskStatus(state=TaskState.working)
mock_context.current_task = current_task
result = await cancel(mock_context, mock_event_queue)
assert result is current_task
assert result.status.state == TaskState.canceled
@pytest.mark.asyncio
async def test_cleanup_after_cancel(
self,
mock_context: MagicMock,
mock_event_queue: AsyncMock,
) -> None:
"""Cancel flag persists for cancellable decorator to detect."""
from aiocache import caches
cache = caches.get("default")
await cancel(mock_context, mock_event_queue)
flag = await cache.get(f"cancel:{mock_context.task_id}")
assert flag is True
await cache.delete(f"cancel:{mock_context.task_id}")
class TestExecuteAndCancelIntegration:
"""Integration tests for execute and cancel working together."""
@pytest.mark.asyncio
async def test_cancel_stops_running_execute(
self,
mock_agent: MagicMock,
mock_context: MagicMock,
mock_event_queue: AsyncMock,
mock_task: MagicMock,
) -> None:
"""Calling cancel stops a running execute."""
async def slow_task(**kwargs: Any) -> str:
await asyncio.sleep(2.0)
return "should not complete"
mock_agent.aexecute_task = slow_task
with (
patch("crewai.a2a.utils.task.Task", return_value=mock_task),
patch("crewai.a2a.utils.task.crewai_event_bus"),
):
execute_task = asyncio.create_task(
execute(mock_agent, mock_context, mock_event_queue)
)
await asyncio.sleep(0.1)
await cancel(mock_context, mock_event_queue)
with pytest.raises(asyncio.CancelledError):
await execute_task

View File

@@ -1,155 +0,0 @@
"""Test trust_remote_completion_status flag in A2A wrapper."""
from unittest.mock import MagicMock, patch
import pytest
from crewai.a2a.config import A2AConfig
try:
from a2a.types import Message, Role
A2A_SDK_INSTALLED = True
except ImportError:
A2A_SDK_INSTALLED = False
def _create_mock_agent_card(name: str = "Test", url: str = "http://test-endpoint.com/"):
"""Create a mock agent card with proper model_dump behavior."""
mock_card = MagicMock()
mock_card.name = name
mock_card.url = url
mock_card.model_dump.return_value = {"name": name, "url": url}
mock_card.model_dump_json.return_value = f'{{"name": "{name}", "url": "{url}"}}'
return mock_card
@pytest.mark.skipif(not A2A_SDK_INSTALLED, reason="Requires a2a-sdk to be installed")
def test_trust_remote_completion_status_true_returns_directly():
"""When trust_remote_completion_status=True and A2A returns completed, return result directly."""
from crewai.a2a.wrapper import _delegate_to_a2a
from crewai.a2a.types import AgentResponseProtocol
from crewai import Agent, Task
a2a_config = A2AConfig(
endpoint="http://test-endpoint.com",
trust_remote_completion_status=True,
)
agent = Agent(
role="test manager",
goal="coordinate",
backstory="test",
a2a=a2a_config,
)
task = Task(description="test", expected_output="test", agent=agent)
class MockResponse:
is_a2a = True
message = "Please help"
a2a_ids = ["http://test-endpoint.com/"]
with (
patch("crewai.a2a.wrapper.execute_a2a_delegation") as mock_execute,
patch("crewai.a2a.wrapper._fetch_agent_cards_concurrently") as mock_fetch,
):
mock_card = _create_mock_agent_card()
mock_fetch.return_value = ({"http://test-endpoint.com/": mock_card}, {})
# A2A returns completed
mock_execute.return_value = {
"status": "completed",
"result": "Done by remote",
"history": [],
}
# This should return directly without checking LLM response
result = _delegate_to_a2a(
self=agent,
agent_response=MockResponse(),
task=task,
original_fn=lambda *args, **kwargs: "fallback",
context=None,
tools=None,
agent_cards={"http://test-endpoint.com/": mock_card},
original_task_description="test",
)
assert result == "Done by remote"
assert mock_execute.call_count == 1
@pytest.mark.skipif(not A2A_SDK_INSTALLED, reason="Requires a2a-sdk to be installed")
def test_trust_remote_completion_status_false_continues_conversation():
"""When trust_remote_completion_status=False and A2A returns completed, ask server agent."""
from crewai.a2a.wrapper import _delegate_to_a2a
from crewai import Agent, Task
a2a_config = A2AConfig(
endpoint="http://test-endpoint.com",
trust_remote_completion_status=False,
)
agent = Agent(
role="test manager",
goal="coordinate",
backstory="test",
a2a=a2a_config,
)
task = Task(description="test", expected_output="test", agent=agent)
class MockResponse:
is_a2a = True
message = "Please help"
a2a_ids = ["http://test-endpoint.com/"]
call_count = 0
def mock_original_fn(self, task, context, tools):
nonlocal call_count
call_count += 1
if call_count == 1:
# Server decides to finish
return '{"is_a2a": false, "message": "Server final answer", "a2a_ids": []}'
return "unexpected"
with (
patch("crewai.a2a.wrapper.execute_a2a_delegation") as mock_execute,
patch("crewai.a2a.wrapper._fetch_agent_cards_concurrently") as mock_fetch,
):
mock_card = _create_mock_agent_card()
mock_fetch.return_value = ({"http://test-endpoint.com/": mock_card}, {})
# A2A returns completed
mock_execute.return_value = {
"status": "completed",
"result": "Done by remote",
"history": [],
}
result = _delegate_to_a2a(
self=agent,
agent_response=MockResponse(),
task=task,
original_fn=mock_original_fn,
context=None,
tools=None,
agent_cards={"http://test-endpoint.com/": mock_card},
original_task_description="test",
)
# Should call original_fn to get server response
assert call_count >= 1
assert result == "Server final answer"
@pytest.mark.skipif(not A2A_SDK_INSTALLED, reason="Requires a2a-sdk to be installed")
def test_default_trust_remote_completion_status_is_false():
"""Verify that default value of trust_remote_completion_status is False."""
a2a_config = A2AConfig(
endpoint="http://test-endpoint.com",
)
assert a2a_config.trust_remote_completion_status is False

View File

@@ -1,217 +0,0 @@
"""Tests for Agent.kickoff() with A2A delegation using VCR cassettes."""
from __future__ import annotations
import os
import pytest
from crewai import Agent
from crewai.a2a.config import A2AClientConfig
A2A_TEST_ENDPOINT = os.getenv(
"A2A_TEST_ENDPOINT", "http://localhost:9999/.well-known/agent-card.json"
)
class TestAgentA2AKickoff:
"""Tests for Agent.kickoff() with A2A delegation."""
@pytest.fixture
def researcher_agent(self) -> Agent:
"""Create a research agent with A2A configuration."""
return Agent(
role="Research Analyst",
goal="Find and analyze information about AI developments",
backstory="Expert researcher with access to remote specialized agents",
verbose=True,
a2a=[
A2AClientConfig(
endpoint=A2A_TEST_ENDPOINT,
fail_fast=False,
max_turns=3, # Limit turns for testing
)
],
)
@pytest.mark.skip(reason="VCR cassette matching issue with agent card caching")
@pytest.mark.vcr()
def test_agent_kickoff_delegates_to_a2a(self, researcher_agent: Agent) -> None:
"""Test that agent.kickoff() delegates to A2A server."""
result = researcher_agent.kickoff(
"Use the remote A2A agent to find out what the current time is in New York."
)
assert result is not None
assert result.raw is not None
assert isinstance(result.raw, str)
assert len(result.raw) > 0
@pytest.mark.skip(reason="VCR cassette matching issue with agent card caching")
@pytest.mark.vcr()
def test_agent_kickoff_with_calculator_skill(
self, researcher_agent: Agent
) -> None:
"""Test that agent can delegate calculation to A2A server."""
result = researcher_agent.kickoff(
"Ask the remote A2A agent to calculate 25 times 17."
)
assert result is not None
assert result.raw is not None
assert "425" in result.raw or "425.0" in result.raw
@pytest.mark.skip(reason="VCR cassette matching issue with agent card caching")
@pytest.mark.vcr()
def test_agent_kickoff_with_conversation_skill(
self, researcher_agent: Agent
) -> None:
"""Test that agent can have a conversation with A2A server."""
result = researcher_agent.kickoff(
"Delegate to the remote A2A agent to explain quantum computing in simple terms."
)
assert result is not None
assert result.raw is not None
assert isinstance(result.raw, str)
assert len(result.raw) > 50 # Should have a meaningful response
@pytest.mark.vcr()
def test_agent_kickoff_returns_lite_agent_output(
self, researcher_agent: Agent
) -> None:
"""Test that kickoff returns LiteAgentOutput with correct structure."""
from crewai.lite_agent_output import LiteAgentOutput
result = researcher_agent.kickoff(
"Use the A2A agent to tell me what time it is."
)
assert isinstance(result, LiteAgentOutput)
assert result.raw is not None
assert result.agent_role == "Research Analyst"
assert isinstance(result.messages, list)
@pytest.mark.skip(reason="VCR cassette matching issue with agent card caching")
@pytest.mark.vcr()
def test_agent_kickoff_handles_multi_turn_conversation(
self, researcher_agent: Agent
) -> None:
"""Test that agent handles multi-turn A2A conversations."""
# This should trigger multiple turns of conversation
result = researcher_agent.kickoff(
"Ask the remote A2A agent about recent developments in AI agent communication protocols."
)
assert result is not None
assert result.raw is not None
# The response should contain information about A2A or agent protocols
assert isinstance(result.raw, str)
@pytest.mark.vcr()
def test_agent_without_a2a_works_normally(self) -> None:
"""Test that agent without A2A config works normally."""
agent = Agent(
role="Simple Assistant",
goal="Help with basic tasks",
backstory="A helpful assistant",
verbose=False,
)
# This should work without A2A delegation
result = agent.kickoff("Say hello")
assert result is not None
assert result.raw is not None
@pytest.mark.vcr()
def test_agent_kickoff_with_failed_a2a_endpoint(self) -> None:
"""Test that agent handles failed A2A connection gracefully."""
agent = Agent(
role="Research Analyst",
goal="Find information",
backstory="Expert researcher",
verbose=False,
a2a=[
A2AClientConfig(
endpoint="http://nonexistent:9999/.well-known/agent-card.json",
fail_fast=False,
)
],
)
# Should fallback to local LLM when A2A fails
result = agent.kickoff("What is 2 + 2?")
assert result is not None
assert result.raw is not None
@pytest.mark.skip(reason="VCR cassette matching issue with agent card caching")
@pytest.mark.vcr()
def test_agent_kickoff_with_list_messages(
self, researcher_agent: Agent
) -> None:
"""Test that agent.kickoff() works with list of messages."""
messages = [
{
"role": "user",
"content": "Delegate to the A2A agent to find the current time in Tokyo.",
},
]
result = researcher_agent.kickoff(messages)
assert result is not None
assert result.raw is not None
assert isinstance(result.raw, str)
class TestAgentA2AKickoffAsync:
"""Tests for async Agent.kickoff_async() with A2A delegation."""
@pytest.fixture
def researcher_agent(self) -> Agent:
"""Create a research agent with A2A configuration."""
return Agent(
role="Research Analyst",
goal="Find and analyze information",
backstory="Expert researcher with access to remote agents",
verbose=True,
a2a=[
A2AClientConfig(
endpoint=A2A_TEST_ENDPOINT,
fail_fast=False,
max_turns=3,
)
],
)
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_agent_kickoff_async_delegates_to_a2a(
self, researcher_agent: Agent
) -> None:
"""Test that agent.kickoff_async() delegates to A2A server."""
result = await researcher_agent.kickoff_async(
"Use the remote A2A agent to calculate 10 plus 15."
)
assert result is not None
assert result.raw is not None
assert isinstance(result.raw, str)
@pytest.mark.skip(reason="Test assertion needs fixing - not capturing final answer")
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_agent_kickoff_async_with_calculator(
self, researcher_agent: Agent
) -> None:
"""Test async delegation with calculator skill."""
result = await researcher_agent.kickoff_async(
"Ask the A2A agent to calculate 100 divided by 4."
)
assert result is not None
assert result.raw is not None
assert "25" in result.raw or "25.0" in result.raw

View File

@@ -1,111 +0,0 @@
"""Test A2A wrapper is only applied when a2a is passed to Agent."""
from unittest.mock import patch
import pytest
from crewai import Agent
from crewai.a2a.config import A2AConfig
try:
import a2a # noqa: F401
A2A_SDK_INSTALLED = True
except ImportError:
A2A_SDK_INSTALLED = False
def test_agent_without_a2a_has_no_wrapper():
"""Verify that agents without a2a don't get the wrapper applied."""
agent = Agent(
role="test role",
goal="test goal",
backstory="test backstory",
)
assert agent.a2a is None
assert callable(agent.execute_task)
@pytest.mark.skipif(
True,
reason="Requires a2a-sdk to be installed. This test verifies wrapper is applied when a2a is set.",
)
def test_agent_with_a2a_has_wrapper():
"""Verify that agents with a2a get the wrapper applied."""
a2a_config = A2AConfig(
endpoint="http://test-endpoint.com",
)
agent = Agent(
role="test role",
goal="test goal",
backstory="test backstory",
a2a=a2a_config,
)
assert agent.a2a is not None
assert agent.a2a.endpoint == "http://test-endpoint.com"
assert callable(agent.execute_task)
@pytest.mark.skipif(not A2A_SDK_INSTALLED, reason="Requires a2a-sdk to be installed")
def test_agent_with_a2a_creates_successfully():
"""Verify that creating an agent with a2a succeeds and applies wrapper."""
a2a_config = A2AConfig(
endpoint="http://test-endpoint.com",
)
agent = Agent(
role="test role",
goal="test goal",
backstory="test backstory",
a2a=a2a_config,
)
assert agent.a2a is not None
assert agent.a2a.endpoint == "http://test-endpoint.com/"
assert callable(agent.execute_task)
assert hasattr(agent.execute_task, "__wrapped__")
def test_multiple_agents_without_a2a():
"""Verify that multiple agents without a2a work correctly."""
agent1 = Agent(
role="agent 1",
goal="test goal",
backstory="test backstory",
)
agent2 = Agent(
role="agent 2",
goal="test goal",
backstory="test backstory",
)
assert agent1.a2a is None
assert agent2.a2a is None
assert callable(agent1.execute_task)
assert callable(agent2.execute_task)
@pytest.mark.skipif(not A2A_SDK_INSTALLED, reason="Requires a2a-sdk to be installed")
def test_wrapper_is_applied_differently_per_instance():
"""Verify that agents with and without a2a have different execute_task methods."""
agent_without_a2a = Agent(
role="agent without a2a",
goal="test goal",
backstory="test backstory",
)
a2a_config = A2AConfig(endpoint="http://test-endpoint.com")
agent_with_a2a = Agent(
role="agent with a2a",
goal="test goal",
backstory="test backstory",
a2a=a2a_config,
)
assert agent_without_a2a.execute_task.__func__ is not agent_with_a2a.execute_task.__func__
assert not hasattr(agent_without_a2a.execute_task, "__wrapped__")
assert hasattr(agent_with_a2a.execute_task, "__wrapped__")

View File

@@ -1,44 +0,0 @@
interactions:
- request:
body: ''
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*'
accept-encoding:
- ACCEPT-ENCODING-XXX
connection:
- keep-alive
host:
- localhost:9999
method: GET
uri: http://localhost:9999/.well-known/agent-card.json
response:
body:
string: '{"capabilities":{"streaming":true},"defaultInputModes":["text"],"defaultOutputModes":["text"],"description":"An
AI assistant powered by OpenAI GPT with calculator and time tools. Ask questions,
perform calculations, or get the current time in any timezone.","name":"GPT
Assistant","preferredTransport":"JSONRPC","protocolVersion":"0.3.0","skills":[{"description":"Have
a general conversation with the AI assistant. Ask questions, get explanations,
or just chat.","examples":["Hello, how are you?","Explain quantum computing
in simple terms","What can you help me with?"],"id":"conversation","name":"General
Conversation","tags":["chat","conversation","general"]},{"description":"Perform
mathematical calculations including arithmetic, exponents, and more.","examples":["What
is 25 * 17?","Calculate 2^10","What''s (100 + 50) / 3?"],"id":"calculator","name":"Calculator","tags":["math","calculator","arithmetic"]},{"description":"Get
the current date and time in any timezone.","examples":["What time is it?","What''s
the current time in Tokyo?","What''s today''s date in New York?"],"id":"time","name":"Current
Time","tags":["time","date","timezone"]}],"url":"http://localhost:9999/","version":"1.0.0"}'
headers:
content-length:
- '1198'
content-type:
- application/json
date:
- Tue, 06 Jan 2026 14:17:00 GMT
server:
- uvicorn
status:
code: 200
message: OK
version: 1

View File

@@ -1,126 +0,0 @@
interactions:
- request:
body: ''
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*'
accept-encoding:
- ACCEPT-ENCODING-XXX
connection:
- keep-alive
host:
- localhost:9999
method: GET
uri: http://localhost:9999/.well-known/agent-card.json
response:
body:
string: '{"capabilities":{"streaming":true},"defaultInputModes":["text"],"defaultOutputModes":["text"],"description":"An
AI assistant powered by OpenAI GPT with calculator and time tools. Ask questions,
perform calculations, or get the current time in any timezone.","name":"GPT
Assistant","preferredTransport":"JSONRPC","protocolVersion":"0.3.0","skills":[{"description":"Have
a general conversation with the AI assistant. Ask questions, get explanations,
or just chat.","examples":["Hello, how are you?","Explain quantum computing
in simple terms","What can you help me with?"],"id":"conversation","name":"General
Conversation","tags":["chat","conversation","general"]},{"description":"Perform
mathematical calculations including arithmetic, exponents, and more.","examples":["What
is 25 * 17?","Calculate 2^10","What''s (100 + 50) / 3?"],"id":"calculator","name":"Calculator","tags":["math","calculator","arithmetic"]},{"description":"Get
the current date and time in any timezone.","examples":["What time is it?","What''s
the current time in Tokyo?","What''s today''s date in New York?"],"id":"time","name":"Current
Time","tags":["time","date","timezone"]}],"url":"http://localhost:9999/","version":"1.0.0"}'
headers:
content-length:
- '1198'
content-type:
- application/json
date:
- Tue, 06 Jan 2026 14:16:58 GMT
server:
- uvicorn
status:
code: 200
message: OK
- request:
body: '{"id":"e5ac2160-ae9b-4bf9-aad7-14bf0d53d6d9","jsonrpc":"2.0","method":"message/stream","params":{"configuration":{"acceptedOutputModes":[],"blocking":true},"message":{"kind":"message","messageId":"e1e63c75-3ea0-49fb-b512-5128a2476416","parts":[{"kind":"text","text":"What
is 2 + 2?"}],"role":"user"}}}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*, text/event-stream'
accept-encoding:
- ACCEPT-ENCODING-XXX
cache-control:
- no-store
connection:
- keep-alive
content-length:
- '301'
content-type:
- application/json
host:
- localhost:9999
method: POST
uri: http://localhost:9999/
response:
body:
string: "data: {\"id\":\"e5ac2160-ae9b-4bf9-aad7-14bf0d53d6d9\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"b9e14c1b-734d-4d1e-864a-e6dda5231d71\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"submitted\"},\"taskId\":\"0dd4d3af-f35d-409d-9462-01218e5641f9\"}}\r\n\r\ndata:
{\"id\":\"e5ac2160-ae9b-4bf9-aad7-14bf0d53d6d9\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"b9e14c1b-734d-4d1e-864a-e6dda5231d71\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"working\"},\"taskId\":\"0dd4d3af-f35d-409d-9462-01218e5641f9\"}}\r\n\r\ndata:
{\"id\":\"e5ac2160-ae9b-4bf9-aad7-14bf0d53d6d9\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"b9e14c1b-734d-4d1e-864a-e6dda5231d71\",\"final\":true,\"kind\":\"status-update\",\"status\":{\"message\":{\"kind\":\"message\",\"messageId\":\"54bb7ff3-f2c0-4eb3-b427-bf1c8cf90832\",\"parts\":[{\"kind\":\"text\",\"text\":\"\\n[Tool:
calculator] 2 + 2 = 4\\n2 + 2 equals 4.\"}],\"role\":\"agent\"},\"state\":\"completed\"},\"taskId\":\"0dd4d3af-f35d-409d-9462-01218e5641f9\"}}\r\n\r\n"
headers:
Transfer-Encoding:
- chunked
cache-control:
- no-store
connection:
- keep-alive
content-type:
- text/event-stream; charset=utf-8
date:
- Tue, 06 Jan 2026 14:16:58 GMT
server:
- uvicorn
x-accel-buffering:
- 'no'
status:
code: 200
message: OK
- request:
body: '{"id":"cb1e4af3-d2d0-4848-96b8-7082ee6171d1","jsonrpc":"2.0","method":"tasks/get","params":{"historyLength":100,"id":"0dd4d3af-f35d-409d-9462-01218e5641f9"}}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*'
accept-encoding:
- ACCEPT-ENCODING-XXX
connection:
- keep-alive
content-length:
- '157'
content-type:
- application/json
host:
- localhost:9999
method: POST
uri: http://localhost:9999/
response:
body:
string: '{"id":"cb1e4af3-d2d0-4848-96b8-7082ee6171d1","jsonrpc":"2.0","result":{"contextId":"b9e14c1b-734d-4d1e-864a-e6dda5231d71","history":[{"contextId":"b9e14c1b-734d-4d1e-864a-e6dda5231d71","kind":"message","messageId":"e1e63c75-3ea0-49fb-b512-5128a2476416","parts":[{"kind":"text","text":"What
is 2 + 2?"}],"role":"user","taskId":"0dd4d3af-f35d-409d-9462-01218e5641f9"}],"id":"0dd4d3af-f35d-409d-9462-01218e5641f9","kind":"task","status":{"message":{"kind":"message","messageId":"54bb7ff3-f2c0-4eb3-b427-bf1c8cf90832","parts":[{"kind":"text","text":"\n[Tool:
calculator] 2 + 2 = 4\n2 + 2 equals 4."}],"role":"agent"},"state":"completed"}}}'
headers:
content-length:
- '635'
content-type:
- application/json
date:
- Tue, 06 Jan 2026 14:17:00 GMT
server:
- uvicorn
status:
code: 200
message: OK
version: 1

View File

@@ -1,90 +0,0 @@
interactions:
- request:
body: ''
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*'
accept-encoding:
- ACCEPT-ENCODING-XXX
connection:
- keep-alive
host:
- localhost:9999
method: GET
uri: http://localhost:9999/.well-known/agent-card.json
response:
body:
string: '{"capabilities":{"streaming":true},"defaultInputModes":["text"],"defaultOutputModes":["text"],"description":"An
AI assistant powered by OpenAI GPT with calculator and time tools. Ask questions,
perform calculations, or get the current time in any timezone.","name":"GPT
Assistant","preferredTransport":"JSONRPC","protocolVersion":"0.3.0","skills":[{"description":"Have
a general conversation with the AI assistant. Ask questions, get explanations,
or just chat.","examples":["Hello, how are you?","Explain quantum computing
in simple terms","What can you help me with?"],"id":"conversation","name":"General
Conversation","tags":["chat","conversation","general"]},{"description":"Perform
mathematical calculations including arithmetic, exponents, and more.","examples":["What
is 25 * 17?","Calculate 2^10","What''s (100 + 50) / 3?"],"id":"calculator","name":"Calculator","tags":["math","calculator","arithmetic"]},{"description":"Get
the current date and time in any timezone.","examples":["What time is it?","What''s
the current time in Tokyo?","What''s today''s date in New York?"],"id":"time","name":"Current
Time","tags":["time","date","timezone"]}],"url":"http://localhost:9999/","version":"1.0.0"}'
headers:
content-length:
- '1198'
content-type:
- application/json
date:
- Tue, 06 Jan 2026 14:17:02 GMT
server:
- uvicorn
status:
code: 200
message: OK
- request:
body: '{"id":"8cf25b61-8884-4246-adce-fccb32e176ab","jsonrpc":"2.0","method":"message/stream","params":{"configuration":{"acceptedOutputModes":[],"blocking":true},"message":{"kind":"message","messageId":"c145297f-7331-4835-adcc-66b51de92a2b","parts":[{"kind":"text","text":"What
is 2 + 2?"}],"role":"user"}}}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*, text/event-stream'
accept-encoding:
- ACCEPT-ENCODING-XXX
cache-control:
- no-store
connection:
- keep-alive
content-length:
- '301'
content-type:
- application/json
host:
- localhost:9999
method: POST
uri: http://localhost:9999/
response:
body:
string: "data: {\"id\":\"8cf25b61-8884-4246-adce-fccb32e176ab\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"30601267-ab3b-48ef-afc8-916c37a18651\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"submitted\"},\"taskId\":\"3083d3da-4739-4f4f-a4e8-7c048ea819c1\"}}\r\n\r\ndata:
{\"id\":\"8cf25b61-8884-4246-adce-fccb32e176ab\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"30601267-ab3b-48ef-afc8-916c37a18651\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"working\"},\"taskId\":\"3083d3da-4739-4f4f-a4e8-7c048ea819c1\"}}\r\n\r\ndata:
{\"id\":\"8cf25b61-8884-4246-adce-fccb32e176ab\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"30601267-ab3b-48ef-afc8-916c37a18651\",\"final\":true,\"kind\":\"status-update\",\"status\":{\"message\":{\"kind\":\"message\",\"messageId\":\"25f81e3c-b7e8-48b5-a98a-4066f3637a13\",\"parts\":[{\"kind\":\"text\",\"text\":\"\\n[Tool:
calculator] 2 + 2 = 4\\n2 + 2 equals 4.\"}],\"role\":\"agent\"},\"state\":\"completed\"},\"taskId\":\"3083d3da-4739-4f4f-a4e8-7c048ea819c1\"}}\r\n\r\n"
headers:
Transfer-Encoding:
- chunked
cache-control:
- no-store
connection:
- keep-alive
content-type:
- text/event-stream; charset=utf-8
date:
- Tue, 06 Jan 2026 14:17:02 GMT
server:
- uvicorn
x-accel-buffering:
- 'no'
status:
code: 200
message: OK
version: 1

View File

@@ -1,90 +0,0 @@
interactions:
- request:
body: ''
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*'
accept-encoding:
- ACCEPT-ENCODING-XXX
connection:
- keep-alive
host:
- localhost:9999
method: GET
uri: http://localhost:9999/.well-known/agent-card.json
response:
body:
string: '{"capabilities":{"streaming":true},"defaultInputModes":["text"],"defaultOutputModes":["text"],"description":"An
AI assistant powered by OpenAI GPT with calculator and time tools. Ask questions,
perform calculations, or get the current time in any timezone.","name":"GPT
Assistant","preferredTransport":"JSONRPC","protocolVersion":"0.3.0","skills":[{"description":"Have
a general conversation with the AI assistant. Ask questions, get explanations,
or just chat.","examples":["Hello, how are you?","Explain quantum computing
in simple terms","What can you help me with?"],"id":"conversation","name":"General
Conversation","tags":["chat","conversation","general"]},{"description":"Perform
mathematical calculations including arithmetic, exponents, and more.","examples":["What
is 25 * 17?","Calculate 2^10","What''s (100 + 50) / 3?"],"id":"calculator","name":"Calculator","tags":["math","calculator","arithmetic"]},{"description":"Get
the current date and time in any timezone.","examples":["What time is it?","What''s
the current time in Tokyo?","What''s today''s date in New York?"],"id":"time","name":"Current
Time","tags":["time","date","timezone"]}],"url":"http://localhost:9999/","version":"1.0.0"}'
headers:
content-length:
- '1198'
content-type:
- application/json
date:
- Tue, 06 Jan 2026 14:17:00 GMT
server:
- uvicorn
status:
code: 200
message: OK
- request:
body: '{"id":"3a17c6bf-8db6-45a6-8535-34c45c0c4936","jsonrpc":"2.0","method":"message/stream","params":{"configuration":{"acceptedOutputModes":[],"blocking":true},"message":{"kind":"message","messageId":"712558a3-6d92-4591-be8a-9dd8566dde82","parts":[{"kind":"text","text":"What
is 2 + 2?"}],"role":"user"}}}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*, text/event-stream'
accept-encoding:
- ACCEPT-ENCODING-XXX
cache-control:
- no-store
connection:
- keep-alive
content-length:
- '301'
content-type:
- application/json
host:
- localhost:9999
method: POST
uri: http://localhost:9999/
response:
body:
string: "data: {\"id\":\"3a17c6bf-8db6-45a6-8535-34c45c0c4936\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"ca2fbbc9-761e-45d9-a929-0c68b1f8acbf\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"submitted\"},\"taskId\":\"c6e88db0-36e9-4269-8b9a-ecb6dfdcf6a1\"}}\r\n\r\ndata:
{\"id\":\"3a17c6bf-8db6-45a6-8535-34c45c0c4936\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"ca2fbbc9-761e-45d9-a929-0c68b1f8acbf\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"working\"},\"taskId\":\"c6e88db0-36e9-4269-8b9a-ecb6dfdcf6a1\"}}\r\n\r\ndata:
{\"id\":\"3a17c6bf-8db6-45a6-8535-34c45c0c4936\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"ca2fbbc9-761e-45d9-a929-0c68b1f8acbf\",\"final\":true,\"kind\":\"status-update\",\"status\":{\"message\":{\"kind\":\"message\",\"messageId\":\"916324aa-fd25-4849-bceb-c4644e2fcbb0\",\"parts\":[{\"kind\":\"text\",\"text\":\"\\n[Tool:
calculator] 2 + 2 = 4\\n2 + 2 equals 4.\"}],\"role\":\"agent\"},\"state\":\"completed\"},\"taskId\":\"c6e88db0-36e9-4269-8b9a-ecb6dfdcf6a1\"}}\r\n\r\n"
headers:
Transfer-Encoding:
- chunked
cache-control:
- no-store
connection:
- keep-alive
content-type:
- text/event-stream; charset=utf-8
date:
- Tue, 06 Jan 2026 14:17:00 GMT
server:
- uvicorn
x-accel-buffering:
- 'no'
status:
code: 200
message: OK
version: 1

View File

@@ -1,665 +0,0 @@
interactions:
- request:
body: '{"messages":[{"role":"system","content":"You are Research Analyst. Expert
researcher with access to remote specialized agents\nYour personal goal is:
Find and analyze information about AI developments"},{"role":"user","content":"\nCurrent
Task: Use the remote A2A agent to find out what the current time is in New York.\n\nProvide
your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"a2a_ids":{"description":"A2A
agent IDs to delegate to.","items":{"const":"http://localhost:9999/.well-known/agent-card.json","type":"string"},"maxItems":1,"title":"A2A
Ids","type":"array"},"message":{"description":"The message content. If is_a2a=true,
this is sent to the A2A agent. If is_a2a=false, this is your final answer ending
the conversation.","title":"Message","type":"string"},"is_a2a":{"description":"Set
to false when the remote agent has answered your question - extract their answer
and return it as your final message. Set to true ONLY if you need to ask a NEW,
DIFFERENT question. NEVER repeat the same request - if the conversation history
shows the agent already answered, set is_a2a=false immediately.","title":"Is
A2A","type":"boolean"}},"required":["a2a_ids","message","is_a2a"],"title":"AgentResponse","type":"object","additionalProperties":false},"name":"AgentResponse","strict":true}},"stream":false}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1385'
content-type:
- application/json
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-helper-method:
- beta.chat.completions.parse
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D3qPKNZ1miu2xMURPYYcLXdKrlMIh\",\n \"object\":
\"chat.completion\",\n \"created\": 1769808810,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"{\\\"a2a_ids\\\":[\\\"http://localhost:9999/.well-known/agent-card.json\\\"],\\\"message\\\":\\\"What
is the current time in New York?\\\",\\\"is_a2a\\\":true}\",\n \"refusal\":
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
274,\n \"completion_tokens\": 40,\n \"total_tokens\": 314,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_e01c6f58e1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Fri, 30 Jan 2026 21:33:31 GMT
Server:
- cloudflare
Set-Cookie:
- SET-COOKIE-XXX
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '854'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: '{"id":"869c0693-9e53-40ae-acd0-5823d73c7808","jsonrpc":"2.0","method":"message/stream","params":{"configuration":{"acceptedOutputModes":["application/json"],"blocking":true},"message":{"kind":"message","messageId":"5c4fc5ee-97c4-42f9-96a0-f1e1205f0832","parts":[{"kind":"text","text":"What
is the current time in New York?"}],"referenceTaskIds":[],"role":"user"}}}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*, text/event-stream'
accept-encoding:
- ACCEPT-ENCODING-XXX
cache-control:
- no-store
connection:
- keep-alive
content-length:
- '364'
content-type:
- application/json
host:
- localhost:9999
method: POST
uri: http://localhost:9999
response:
body:
string: "data: {\"id\":\"869c0693-9e53-40ae-acd0-5823d73c7808\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"0aba73f6-87de-4e43-9a5a-7ebd22f590e3\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"submitted\"},\"taskId\":\"0aca3052-dd0b-4f0e-bc0e-a646b0a86de2\"}}\r\n\r\ndata:
{\"id\":\"869c0693-9e53-40ae-acd0-5823d73c7808\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"0aba73f6-87de-4e43-9a5a-7ebd22f590e3\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"working\"},\"taskId\":\"0aca3052-dd0b-4f0e-bc0e-a646b0a86de2\"}}\r\n\r\ndata:
{\"id\":\"869c0693-9e53-40ae-acd0-5823d73c7808\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"0aba73f6-87de-4e43-9a5a-7ebd22f590e3\",\"final\":true,\"kind\":\"status-update\",\"status\":{\"message\":{\"kind\":\"message\",\"messageId\":\"f70e8fe1-26c9-47e1-9331-1be893e0c5b0\",\"parts\":[{\"kind\":\"text\",\"text\":\"[Tool:
get_time] 2026-01-30 16:33:31 EST (America/New_York)\\nThe current time in
New York is 4:33 PM EST.\"}],\"role\":\"agent\"},\"state\":\"completed\"},\"taskId\":\"0aca3052-dd0b-4f0e-bc0e-a646b0a86de2\"}}\r\n\r\n"
headers:
cache-control:
- no-store
connection:
- keep-alive
content-type:
- text/event-stream; charset=utf-8
date:
- Fri, 30 Jan 2026 21:33:30 GMT
server:
- uvicorn
transfer-encoding:
- chunked
x-accel-buffering:
- 'no'
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Research Analyst. Expert
researcher with access to remote specialized agents\nYour personal goal is:
Find and analyze information about AI developments"},{"role":"user","content":"\nCurrent
Task: Use the remote A2A agent to find out what the current time is in New York.\n\nProvide
your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"a2a_ids":{"description":"A2A
agent IDs to delegate to.","items":{"const":"http://localhost:9999/.well-known/agent-card.json","type":"string"},"maxItems":1,"title":"A2A
Ids","type":"array"},"message":{"description":"The message content. If is_a2a=true,
this is sent to the A2A agent. If is_a2a=false, this is your final answer ending
the conversation.","title":"Message","type":"string"},"is_a2a":{"description":"Set
to false when the remote agent has answered your question - extract their answer
and return it as your final message. Set to true ONLY if you need to ask a NEW,
DIFFERENT question. NEVER repeat the same request - if the conversation history
shows the agent already answered, set is_a2a=false immediately.","title":"Is
A2A","type":"boolean"}},"required":["a2a_ids","message","is_a2a"],"title":"AgentResponse","type":"object","additionalProperties":false},"name":"AgentResponse","strict":true}},"stream":false}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1385'
content-type:
- application/json
cookie:
- COOKIE-XXX
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-helper-method:
- beta.chat.completions.parse
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D3qPN7ipYzYuI3Htoj13LNHyic8RB\",\n \"object\":
\"chat.completion\",\n \"created\": 1769808813,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"{\\\"a2a_ids\\\":[\\\"http://localhost:9999/.well-known/agent-card.json\\\"],\\\"message\\\":\\\"What
is the current time in New York?\\\",\\\"is_a2a\\\":true}\",\n \"refusal\":
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
274,\n \"completion_tokens\": 40,\n \"total_tokens\": 314,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_e01c6f58e1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Fri, 30 Jan 2026 21:33:34 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '660'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: '{"id":"7aac6192-3805-4a2f-b9b1-3e281f723f35","jsonrpc":"2.0","method":"message/stream","params":{"configuration":{"acceptedOutputModes":["application/json"],"blocking":true},"message":{"kind":"message","messageId":"0a76a4cc-e497-491e-84e0-2a9d35e106b5","parts":[{"kind":"text","text":"What
is the current time in New York?"}],"referenceTaskIds":[],"role":"user"}}}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*, text/event-stream'
accept-encoding:
- ACCEPT-ENCODING-XXX
cache-control:
- no-store
connection:
- keep-alive
content-length:
- '364'
content-type:
- application/json
host:
- localhost:9999
method: POST
uri: http://localhost:9999
response:
body:
string: "data: {\"id\":\"7aac6192-3805-4a2f-b9b1-3e281f723f35\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"7a2ffd0b-8d57-4a06-8e61-614cb6132b76\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"submitted\"},\"taskId\":\"a562b802-b554-4dce-93f1-d3b757ab1e93\"}}\r\n\r\ndata:
{\"id\":\"7aac6192-3805-4a2f-b9b1-3e281f723f35\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"7a2ffd0b-8d57-4a06-8e61-614cb6132b76\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"working\"},\"taskId\":\"a562b802-b554-4dce-93f1-d3b757ab1e93\"}}\r\n\r\ndata:
{\"id\":\"7aac6192-3805-4a2f-b9b1-3e281f723f35\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"7a2ffd0b-8d57-4a06-8e61-614cb6132b76\",\"final\":true,\"kind\":\"status-update\",\"status\":{\"message\":{\"kind\":\"message\",\"messageId\":\"9a5678a8-9fdc-47c5-b7fa-061da1bf98e1\",\"parts\":[{\"kind\":\"text\",\"text\":\"[Tool:
get_time] 2026-01-30 16:33:34 EST (America/New_York)\\nThe current time in
New York is 4:33 PM EST.\"}],\"role\":\"agent\"},\"state\":\"completed\"},\"taskId\":\"a562b802-b554-4dce-93f1-d3b757ab1e93\"}}\r\n\r\n"
headers:
cache-control:
- no-store
connection:
- keep-alive
content-type:
- text/event-stream; charset=utf-8
date:
- Fri, 30 Jan 2026 21:33:33 GMT
server:
- uvicorn
transfer-encoding:
- chunked
x-accel-buffering:
- 'no'
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Research Analyst. Expert
researcher with access to remote specialized agents\nYour personal goal is:
Find and analyze information about AI developments"},{"role":"user","content":"\nCurrent
Task: Use the remote A2A agent to find out what the current time is in New York.\n\nProvide
your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"a2a_ids":{"description":"A2A
agent IDs to delegate to.","items":{"const":"http://localhost:9999/.well-known/agent-card.json","type":"string"},"maxItems":1,"title":"A2A
Ids","type":"array"},"message":{"description":"The message content. If is_a2a=true,
this is sent to the A2A agent. If is_a2a=false, this is your final answer ending
the conversation.","title":"Message","type":"string"},"is_a2a":{"description":"Set
to false when the remote agent has answered your question - extract their answer
and return it as your final message. Set to true ONLY if you need to ask a NEW,
DIFFERENT question. NEVER repeat the same request - if the conversation history
shows the agent already answered, set is_a2a=false immediately.","title":"Is
A2A","type":"boolean"}},"required":["a2a_ids","message","is_a2a"],"title":"AgentResponse","type":"object","additionalProperties":false},"name":"AgentResponse","strict":true}},"stream":false}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1385'
content-type:
- application/json
cookie:
- COOKIE-XXX
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-helper-method:
- beta.chat.completions.parse
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D3qPQoeD1yHZjR5bbWnq9fMdIPMFu\",\n \"object\":
\"chat.completion\",\n \"created\": 1769808816,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"{\\\"a2a_ids\\\":[\\\"http://localhost:9999/.well-known/agent-card.json\\\"],\\\"message\\\":\\\"What
is the current local time in New York?\\\",\\\"is_a2a\\\":true}\",\n \"refusal\":
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
274,\n \"completion_tokens\": 41,\n \"total_tokens\": 315,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_e01c6f58e1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Fri, 30 Jan 2026 21:33:37 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '684'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: '{"id":"03e755a9-bf97-4c55-bd2f-fbc23e3385ef","jsonrpc":"2.0","method":"message/stream","params":{"configuration":{"acceptedOutputModes":["application/json"],"blocking":true},"message":{"kind":"message","messageId":"4c7c5802-a8b0-40ac-bbf4-4ff1cb0b10f3","parts":[{"kind":"text","text":"What
is the current local time in New York?"}],"referenceTaskIds":[],"role":"user"}}}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*, text/event-stream'
accept-encoding:
- ACCEPT-ENCODING-XXX
cache-control:
- no-store
connection:
- keep-alive
content-length:
- '370'
content-type:
- application/json
host:
- localhost:9999
method: POST
uri: http://localhost:9999
response:
body:
string: "data: {\"id\":\"03e755a9-bf97-4c55-bd2f-fbc23e3385ef\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"fcef0a1d-ba1d-4703-88a7-0caddb7d8602\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"submitted\"},\"taskId\":\"01abf50f-944a-4814-816c-170e460a542a\"}}\r\n\r\ndata:
{\"id\":\"03e755a9-bf97-4c55-bd2f-fbc23e3385ef\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"fcef0a1d-ba1d-4703-88a7-0caddb7d8602\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"working\"},\"taskId\":\"01abf50f-944a-4814-816c-170e460a542a\"}}\r\n\r\ndata:
{\"id\":\"03e755a9-bf97-4c55-bd2f-fbc23e3385ef\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"fcef0a1d-ba1d-4703-88a7-0caddb7d8602\",\"final\":true,\"kind\":\"status-update\",\"status\":{\"message\":{\"kind\":\"message\",\"messageId\":\"cc32e320-d067-4006-85ad-c3eb988ee0cc\",\"parts\":[{\"kind\":\"text\",\"text\":\"[Tool:
get_time] 2026-01-30 16:33:38 EST (America/New_York)\\nThe current local time
in New York is 4:33 PM EST on January 30, 2026.\"}],\"role\":\"agent\"},\"state\":\"completed\"},\"taskId\":\"01abf50f-944a-4814-816c-170e460a542a\"}}\r\n\r\n"
headers:
cache-control:
- no-store
connection:
- keep-alive
content-type:
- text/event-stream; charset=utf-8
date:
- Fri, 30 Jan 2026 21:33:37 GMT
server:
- uvicorn
transfer-encoding:
- chunked
x-accel-buffering:
- 'no'
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Research Analyst. Expert
researcher with access to remote specialized agents\nYour personal goal is:
Find and analyze information about AI developments"},{"role":"user","content":"\nCurrent
Task: Use the remote A2A agent to find out what the current time is in New York.\n\nProvide
your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"a2a_ids":{"description":"A2A
agent IDs to delegate to.","items":{"const":"http://localhost:9999/.well-known/agent-card.json","type":"string"},"maxItems":1,"title":"A2A
Ids","type":"array"},"message":{"description":"The message content. If is_a2a=true,
this is sent to the A2A agent. If is_a2a=false, this is your final answer ending
the conversation.","title":"Message","type":"string"},"is_a2a":{"description":"Set
to false when the remote agent has answered your question - extract their answer
and return it as your final message. Set to true ONLY if you need to ask a NEW,
DIFFERENT question. NEVER repeat the same request - if the conversation history
shows the agent already answered, set is_a2a=false immediately.","title":"Is
A2A","type":"boolean"}},"required":["a2a_ids","message","is_a2a"],"title":"AgentResponse","type":"object","additionalProperties":false},"name":"AgentResponse","strict":true}},"stream":false}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1385'
content-type:
- application/json
cookie:
- COOKIE-XXX
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-helper-method:
- beta.chat.completions.parse
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D3qPUBwXUl9D9xr19IR5ayWaliTQc\",\n \"object\":
\"chat.completion\",\n \"created\": 1769808820,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"{\\\"a2a_ids\\\":[\\\"http://localhost:9999/.well-known/agent-card.json\\\"],\\\"message\\\":\\\"What
is the current time in New York?\\\",\\\"is_a2a\\\":true}\",\n \"refusal\":
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
274,\n \"completion_tokens\": 40,\n \"total_tokens\": 314,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_e01c6f58e1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Fri, 30 Jan 2026 21:33:41 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '844'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: ''
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*'
accept-encoding:
- ACCEPT-ENCODING-XXX
connection:
- keep-alive
host:
- localhost:9999
method: GET
uri: http://localhost:9999/.well-known/agent-card.json
response:
body:
string: '{"capabilities":{"pushNotifications":true,"streaming":true},"defaultInputModes":["text/plain","application/json"],"defaultOutputModes":["text/plain","application/json"],"description":"An
AI assistant powered by OpenAI GPT with calculator and time tools. Ask questions,
perform calculations, or get the current time in any timezone.","name":"GPT
Assistant","preferredTransport":"JSONRPC","protocolVersion":"0.3.0","skills":[{"description":"Have
a general conversation with the AI assistant. Ask questions, get explanations,
or just chat.","examples":["Hello, how are you?","Explain quantum computing
in simple terms","What can you help me with?"],"id":"conversation","name":"General
Conversation","tags":["chat","conversation","general"]},{"description":"Perform
mathematical calculations including arithmetic, exponents, and more.","examples":["What
is 25 * 17?","Calculate 2^10","What''s (100 + 50) / 3?"],"id":"calculator","name":"Calculator","tags":["math","calculator","arithmetic"]},{"description":"Get
the current date and time in any timezone.","examples":["What time is it?","What''s
the current time in Tokyo?","What''s today''s date in New York?"],"id":"time","name":"Current
Time","tags":["time","date","timezone"]}],"url":"http://localhost:9999","version":"1.0.0"}'
headers:
content-length:
- '1272'
content-type:
- application/json
date:
- Fri, 30 Jan 2026 21:49:04 GMT
server:
- uvicorn
status:
code: 200
message: OK
version: 1

View File

@@ -1,744 +0,0 @@
interactions:
- request:
body: '{"messages":[{"role":"system","content":"You are Research Analyst. Expert
researcher with access to remote specialized agents\nYour personal goal is:
Find and analyze information about AI developments"},{"role":"user","content":"\nCurrent
Task: Ask the remote A2A agent about recent developments in AI agent communication
protocols.\n\nProvide your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"a2a_ids":{"description":"A2A
agent IDs to delegate to.","items":{"const":"http://localhost:9999/.well-known/agent-card.json","type":"string"},"maxItems":1,"title":"A2A
Ids","type":"array"},"message":{"description":"The message content. If is_a2a=true,
this is sent to the A2A agent. If is_a2a=false, this is your final answer ending
the conversation.","title":"Message","type":"string"},"is_a2a":{"description":"Set
to false when the remote agent has answered your question - extract their answer
and return it as your final message. Set to true ONLY if you need to ask a NEW,
DIFFERENT question. NEVER repeat the same request - if the conversation history
shows the agent already answered, set is_a2a=false immediately.","title":"Is
A2A","type":"boolean"}},"required":["a2a_ids","message","is_a2a"],"title":"AgentResponse","type":"object","additionalProperties":false},"name":"AgentResponse","strict":true}},"stream":false}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1398'
content-type:
- application/json
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-helper-method:
- beta.chat.completions.parse
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D3qPVb8R5TRMw6i6NIg1K1QCH37DH\",\n \"object\":
\"chat.completion\",\n \"created\": 1769808821,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"{\\\"a2a_ids\\\":[\\\"http://localhost:9999/.well-known/agent-card.json\\\"],\\\"message\\\":\\\"Could
you provide detailed information on the latest developments and advancements
in AI agent communication protocols? I'm particularly interested in new standards,
interoperability improvements, and innovative methods adopted recently.\\\",\\\"is_a2a\\\":true}\",\n
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\":
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
271,\n \"completion_tokens\": 63,\n \"total_tokens\": 334,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_e01c6f58e1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Fri, 30 Jan 2026 21:33:42 GMT
Server:
- cloudflare
Set-Cookie:
- SET-COOKIE-XXX
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '1076'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: '{"id":"7a3da9c5-2ad7-4334-9cb8-3e45920013f3","jsonrpc":"2.0","method":"message/stream","params":{"configuration":{"acceptedOutputModes":["application/json"],"blocking":true},"message":{"kind":"message","messageId":"a3d6ab2f-4e18-4b1c-9a5a-1d326b500dc6","parts":[{"kind":"text","text":"Could
you provide detailed information on the latest developments and advancements
in AI agent communication protocols? I''m particularly interested in new standards,
interoperability improvements, and innovative methods adopted recently."}],"referenceTaskIds":[],"role":"user"}}}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*, text/event-stream'
accept-encoding:
- ACCEPT-ENCODING-XXX
cache-control:
- no-store
connection:
- keep-alive
content-length:
- '564'
content-type:
- application/json
host:
- localhost:9999
method: POST
uri: http://localhost:9999
response:
body:
string: "data: {\"id\":\"7a3da9c5-2ad7-4334-9cb8-3e45920013f3\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"89db77f5-1399-4c96-827b-2d4bac7f412d\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"submitted\"},\"taskId\":\"4c4fceee-2a1b-4050-a958-56f23e9f5920\"}}\r\n\r\ndata:
{\"id\":\"7a3da9c5-2ad7-4334-9cb8-3e45920013f3\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"89db77f5-1399-4c96-827b-2d4bac7f412d\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"working\"},\"taskId\":\"4c4fceee-2a1b-4050-a958-56f23e9f5920\"}}\r\n\r\ndata:
{\"id\":\"7a3da9c5-2ad7-4334-9cb8-3e45920013f3\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"89db77f5-1399-4c96-827b-2d4bac7f412d\",\"final\":true,\"kind\":\"status-update\",\"status\":{\"message\":{\"kind\":\"message\",\"messageId\":\"b7b4db08-f796-4233-a606-44b65791928b\",\"parts\":[{\"kind\":\"text\",\"text\":\"Recent
developments in AI agent communication protocols have focused on enhancing
interoperability, establishing new standards, and introducing innovative methods.
Here are some key advancements:\\n\\n1. **New Standards**: \\n - **IEEE
P7010**: This is a standard under development that aims to establish ethical
guidelines for AI systems, particularly in how they communicate and interact
with humans and other systems. \\n - **W3C's Vocabulary for AI**: The World
Wide Web Consortium (W3C) is working on creating vocabularies and protocols
to facilitate clearer communication between AI agents across different platforms.\\n\\n2.
**Interoperability Improvements**:\\n - **API Standardization**: Efforts
like the OpenAPI Specification (formerly known as Swagger) are being adopted
for AI models, allowing better integration and communication between different
AI systems and services.\\n - **Interoperable Frameworks**: Frameworks such
as the AI Exchange (a protocol for exchanging AI models and data) are evolving
to ensure that different AI systems can work together more effectively, enhancing
data sharing and joint operations.\\n\\n3. **Innovative Methods**:\\n -
**Natural Language Understanding (NLU)**: Advances in NLU technologies have
made communication more intuitive. AI agents now better grasp context, intent,
and sentiment, allowing for more fluid interactions.\\n - **Multi-agent
Systems (MAS)**: Recent research is focusing on improving how multiple AI
agents communicate within a shared environment, utilizing protocols that allow
for collaborative problem-solving and negotiation.\\n - **Federated Learning**:
This method enhances privacy and security in communications by allowing AI
models to learn collaboratively across multiple devices or systems without
sharing personal data.\\n\\n4. **Decentralized Protocols**: The emergence
of decentralized communication protocols, such as those based on blockchain
technology, is enabling AI agents to communicate securely and transparently,
fostering trust among users and systems.\\n\\n5. **Research Collaborations**:
Initiatives like the Partnership on AI have brought together companies, academia,
and civil society to explore best practices in communication protocols that
prioritize ethical considerations.\\n\\nOverall, these advancements in standards
and protocols are aimed at creating a more interconnected and efficient environment
for AI agents, encouraging innovation while addressing ethical and interoperability
challenges.\"}],\"role\":\"agent\"},\"state\":\"completed\"},\"taskId\":\"4c4fceee-2a1b-4050-a958-56f23e9f5920\"}}\r\n\r\n"
headers:
cache-control:
- no-store
connection:
- keep-alive
content-type:
- text/event-stream; charset=utf-8
date:
- Fri, 30 Jan 2026 21:33:42 GMT
server:
- uvicorn
transfer-encoding:
- chunked
x-accel-buffering:
- 'no'
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Research Analyst. Expert
researcher with access to remote specialized agents\nYour personal goal is:
Find and analyze information about AI developments"},{"role":"user","content":"\nCurrent
Task: Ask the remote A2A agent about recent developments in AI agent communication
protocols.\n\nProvide your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"a2a_ids":{"description":"A2A
agent IDs to delegate to.","items":{"const":"http://localhost:9999/.well-known/agent-card.json","type":"string"},"maxItems":1,"title":"A2A
Ids","type":"array"},"message":{"description":"The message content. If is_a2a=true,
this is sent to the A2A agent. If is_a2a=false, this is your final answer ending
the conversation.","title":"Message","type":"string"},"is_a2a":{"description":"Set
to false when the remote agent has answered your question - extract their answer
and return it as your final message. Set to true ONLY if you need to ask a NEW,
DIFFERENT question. NEVER repeat the same request - if the conversation history
shows the agent already answered, set is_a2a=false immediately.","title":"Is
A2A","type":"boolean"}},"required":["a2a_ids","message","is_a2a"],"title":"AgentResponse","type":"object","additionalProperties":false},"name":"AgentResponse","strict":true}},"stream":false}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1398'
content-type:
- application/json
cookie:
- COOKIE-XXX
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-helper-method:
- beta.chat.completions.parse
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D3qPjIHCyZCk5jLwgIGOvDH1NvaPu\",\n \"object\":
\"chat.completion\",\n \"created\": 1769808835,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"{\\\"a2a_ids\\\":[\\\"http://localhost:9999/.well-known/agent-card.json\\\"],\\\"message\\\":\\\"Could
you provide an overview of the recent developments in AI agent communication
protocols? Specifically, updates or breakthroughs in how AI agents communicate
and coordinate with each other.\\\",\\\"is_a2a\\\":true}\",\n \"refusal\":
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
271,\n \"completion_tokens\": 61,\n \"total_tokens\": 332,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_e01c6f58e1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Fri, 30 Jan 2026 21:33:57 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '1453'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: '{"id":"1fc46982-0250-430d-897b-d12ac939f2ae","jsonrpc":"2.0","method":"message/stream","params":{"configuration":{"acceptedOutputModes":["application/json"],"blocking":true},"message":{"kind":"message","messageId":"fdd2c664-4eef-4b15-b62d-1ff72b18faf6","parts":[{"kind":"text","text":"Could
you provide an overview of the recent developments in AI agent communication
protocols? Specifically, updates or breakthroughs in how AI agents communicate
and coordinate with each other."}],"referenceTaskIds":[],"role":"user"}}}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*, text/event-stream'
accept-encoding:
- ACCEPT-ENCODING-XXX
cache-control:
- no-store
connection:
- keep-alive
content-length:
- '520'
content-type:
- application/json
host:
- localhost:9999
method: POST
uri: http://localhost:9999
response:
body:
string: "data: {\"id\":\"1fc46982-0250-430d-897b-d12ac939f2ae\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"2f12f4df-96b1-41db-9c40-b345b214f107\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"submitted\"},\"taskId\":\"105db7f0-0cc6-43f8-a224-24b6d46c01fb\"}}\r\n\r\ndata:
{\"id\":\"1fc46982-0250-430d-897b-d12ac939f2ae\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"2f12f4df-96b1-41db-9c40-b345b214f107\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"working\"},\"taskId\":\"105db7f0-0cc6-43f8-a224-24b6d46c01fb\"}}\r\n\r\ndata:
{\"id\":\"1fc46982-0250-430d-897b-d12ac939f2ae\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"2f12f4df-96b1-41db-9c40-b345b214f107\",\"final\":true,\"kind\":\"status-update\",\"status\":{\"message\":{\"kind\":\"message\",\"messageId\":\"5a60601d-90f9-452b-8356-9335daa61d6a\",\"parts\":[{\"kind\":\"text\",\"text\":\"Recent
developments in AI agent communication protocols have focused on enhancing
interoperability, efficiency, and security among AI systems. Key breakthroughs
include:\\n\\n1. **Standardization of Protocols**: Efforts such as the IEEE
P2870 standard aim to create a unified communication framework that allows
different AI agents to interact seamlessly across platforms.\\n\\n2. **Multi-agent
Reinforcement Learning (MARL)**: Innovations in MARL enable agents to learn
from each other and improve communication strategies, facilitating better
coordination in dynamic environments.\\n\\n3. **Natural Language Processing
(NLP) Enhancements**: Advancements in NLP have improved how AI agents understand
and generate human language, enabling more intuitive communication both with
humans and among themselves.\\n\\n4. **Decentralized Communication Frameworks**:
Protocols using blockchain technology allow for secure and transparent communication
between agents without a central authority, promoting trust and data integrity.\\n\\n5.
**Contextual Understanding**: New algorithms enhance AI's ability to maintain
context in conversations, allowing agents to communicate more effectively
in multi-turn dialogues and complex scenarios.\\n\\n6. **Communication Efficiency**:
Research into compressed communication techniques is helping reduce bandwidth
usage while maintaining the effectiveness of information exchange between
agents. \\n\\nThese advancements are paving the way for more autonomous and
collaborative AI systems capable of complex task execution.\"}],\"role\":\"agent\"},\"state\":\"completed\"},\"taskId\":\"105db7f0-0cc6-43f8-a224-24b6d46c01fb\"}}\r\n\r\n"
headers:
cache-control:
- no-store
connection:
- keep-alive
content-type:
- text/event-stream; charset=utf-8
date:
- Fri, 30 Jan 2026 21:33:56 GMT
server:
- uvicorn
transfer-encoding:
- chunked
x-accel-buffering:
- 'no'
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Research Analyst. Expert
researcher with access to remote specialized agents\nYour personal goal is:
Find and analyze information about AI developments"},{"role":"user","content":"\nCurrent
Task: Ask the remote A2A agent about recent developments in AI agent communication
protocols.\n\nProvide your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"a2a_ids":{"description":"A2A
agent IDs to delegate to.","items":{"const":"http://localhost:9999/.well-known/agent-card.json","type":"string"},"maxItems":1,"title":"A2A
Ids","type":"array"},"message":{"description":"The message content. If is_a2a=true,
this is sent to the A2A agent. If is_a2a=false, this is your final answer ending
the conversation.","title":"Message","type":"string"},"is_a2a":{"description":"Set
to false when the remote agent has answered your question - extract their answer
and return it as your final message. Set to true ONLY if you need to ask a NEW,
DIFFERENT question. NEVER repeat the same request - if the conversation history
shows the agent already answered, set is_a2a=false immediately.","title":"Is
A2A","type":"boolean"}},"required":["a2a_ids","message","is_a2a"],"title":"AgentResponse","type":"object","additionalProperties":false},"name":"AgentResponse","strict":true}},"stream":false}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1398'
content-type:
- application/json
cookie:
- COOKIE-XXX
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-helper-method:
- beta.chat.completions.parse
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D3qPus9YgQVHUbVZ4ytiARJMcg7wS\",\n \"object\":
\"chat.completion\",\n \"created\": 1769808846,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"{\\\"a2a_ids\\\":[\\\"http://localhost:9999/.well-known/agent-card.json\\\"],\\\"message\\\":\\\"Could
you please provide information about the recent developments in AI agent communication
protocols?\\\",\\\"is_a2a\\\":true}\",\n \"refusal\": null,\n \"annotations\":
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 271,\n \"completion_tokens\":
46,\n \"total_tokens\": 317,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_e01c6f58e1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Fri, 30 Jan 2026 21:34:07 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '903'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: '{"id":"b6af80c8-d1de-4149-9093-bac40b337eba","jsonrpc":"2.0","method":"message/stream","params":{"configuration":{"acceptedOutputModes":["application/json"],"blocking":true},"message":{"kind":"message","messageId":"8505e288-5d79-4420-88f4-79d8161cd1b5","parts":[{"kind":"text","text":"Could
you please provide information about the recent developments in AI agent communication
protocols?"}],"referenceTaskIds":[],"role":"user"}}}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*, text/event-stream'
accept-encoding:
- ACCEPT-ENCODING-XXX
cache-control:
- no-store
connection:
- keep-alive
content-length:
- '430'
content-type:
- application/json
host:
- localhost:9999
method: POST
uri: http://localhost:9999
response:
body:
string: "data: {\"id\":\"b6af80c8-d1de-4149-9093-bac40b337eba\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"3fa6f8f9-51c6-417c-8680-732f7de8b0f4\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"submitted\"},\"taskId\":\"d1464f6c-b2b3-4f24-8918-322f152caf74\"}}\r\n\r\ndata:
{\"id\":\"b6af80c8-d1de-4149-9093-bac40b337eba\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"3fa6f8f9-51c6-417c-8680-732f7de8b0f4\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"working\"},\"taskId\":\"d1464f6c-b2b3-4f24-8918-322f152caf74\"}}\r\n\r\ndata:
{\"id\":\"b6af80c8-d1de-4149-9093-bac40b337eba\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"3fa6f8f9-51c6-417c-8680-732f7de8b0f4\",\"final\":true,\"kind\":\"status-update\",\"status\":{\"message\":{\"kind\":\"message\",\"messageId\":\"bd09b7bc-4295-4933-92b9-b2ceeab38aba\",\"parts\":[{\"kind\":\"text\",\"text\":\"Recent
developments in AI agent communication protocols have focused on enhancing
interoperability, robustness, and security among different AI systems. Key
trends include:\\n\\n1. **Standardization Efforts**: Organizations are working
towards creating standardized protocols that allow various AI agents to communicate
seamlessly, such as the use of IEEE's initiatives.\\n\\n2. **Interoperability
Frameworks**: Frameworks such as Agent Communication Language (ACL) and FIPA
standards are being updated to support richer interactions among AI systems.\\n\\n3.
**Natural Language Processing (NLP) Improvements**: Advances in NLP are enabling
agents to understand and generate human-like responses better, allowing for
more natural communication.\\n\\n4. **Context Management**: New protocols
are incorporating context-awareness so that agents can understand and adjust
their responses based on the situational context in which communication occurs.\\n\\n5.
**Security Protocol Enhancements**: With rising concerns over data privacy,
new communication protocols are integrating stronger security measures, including
encryption and authentication methods to protect the exchange of sensitive
information.\\n\\n6. **Decentralized Communication**: There's an ongoing exploration
into decentralized protocols using blockchain technology to ensure transparency
and trust in agent communications.\\n\\nThese advancements are instrumental
in creating a more connected and effective network of AI agents capable of
cooperating and collaborating across diverse applications and industries.\"}],\"role\":\"agent\"},\"state\":\"completed\"},\"taskId\":\"d1464f6c-b2b3-4f24-8918-322f152caf74\"}}\r\n\r\n"
headers:
cache-control:
- no-store
connection:
- keep-alive
content-type:
- text/event-stream; charset=utf-8
date:
- Fri, 30 Jan 2026 21:34:07 GMT
server:
- uvicorn
transfer-encoding:
- chunked
x-accel-buffering:
- 'no'
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Research Analyst. Expert
researcher with access to remote specialized agents\nYour personal goal is:
Find and analyze information about AI developments"},{"role":"user","content":"\nCurrent
Task: Ask the remote A2A agent about recent developments in AI agent communication
protocols.\n\nProvide your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"a2a_ids":{"description":"A2A
agent IDs to delegate to.","items":{"const":"http://localhost:9999/.well-known/agent-card.json","type":"string"},"maxItems":1,"title":"A2A
Ids","type":"array"},"message":{"description":"The message content. If is_a2a=true,
this is sent to the A2A agent. If is_a2a=false, this is your final answer ending
the conversation.","title":"Message","type":"string"},"is_a2a":{"description":"Set
to false when the remote agent has answered your question - extract their answer
and return it as your final message. Set to true ONLY if you need to ask a NEW,
DIFFERENT question. NEVER repeat the same request - if the conversation history
shows the agent already answered, set is_a2a=false immediately.","title":"Is
A2A","type":"boolean"}},"required":["a2a_ids","message","is_a2a"],"title":"AgentResponse","type":"object","additionalProperties":false},"name":"AgentResponse","strict":true}},"stream":false}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1398'
content-type:
- application/json
cookie:
- COOKIE-XXX
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-helper-method:
- beta.chat.completions.parse
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D3qQ6ybuZARDZhmEoVvSYKa8jh4xN\",\n \"object\":
\"chat.completion\",\n \"created\": 1769808858,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"{\\\"a2a_ids\\\":[\\\"http://localhost:9999/.well-known/agent-card.json\\\"],\\\"message\\\":\\\"Can
you provide the latest information on recent developments in AI agent communication
protocols? Specifically, I am interested in new standards, methods, or tools
that have emerged to enhance interoperability and efficiency among AI agents.\\\",\\\"is_a2a\\\":true}\",\n
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\":
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
271,\n \"completion_tokens\": 70,\n \"total_tokens\": 341,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_e01c6f58e1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Fri, 30 Jan 2026 21:34:19 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '1228'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: ''
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*'
accept-encoding:
- ACCEPT-ENCODING-XXX
connection:
- keep-alive
host:
- localhost:9999
method: GET
uri: http://localhost:9999/.well-known/agent-card.json
response:
body:
string: '{"capabilities":{"pushNotifications":true,"streaming":true},"defaultInputModes":["text/plain","application/json"],"defaultOutputModes":["text/plain","application/json"],"description":"An
AI assistant powered by OpenAI GPT with calculator and time tools. Ask questions,
perform calculations, or get the current time in any timezone.","name":"GPT
Assistant","preferredTransport":"JSONRPC","protocolVersion":"0.3.0","skills":[{"description":"Have
a general conversation with the AI assistant. Ask questions, get explanations,
or just chat.","examples":["Hello, how are you?","Explain quantum computing
in simple terms","What can you help me with?"],"id":"conversation","name":"General
Conversation","tags":["chat","conversation","general"]},{"description":"Perform
mathematical calculations including arithmetic, exponents, and more.","examples":["What
is 25 * 17?","Calculate 2^10","What''s (100 + 50) / 3?"],"id":"calculator","name":"Calculator","tags":["math","calculator","arithmetic"]},{"description":"Get
the current date and time in any timezone.","examples":["What time is it?","What''s
the current time in Tokyo?","What''s today''s date in New York?"],"id":"time","name":"Current
Time","tags":["time","date","timezone"]}],"url":"http://localhost:9999","version":"1.0.0"}'
headers:
content-length:
- '1272'
content-type:
- application/json
date:
- Fri, 30 Jan 2026 21:49:26 GMT
server:
- uvicorn
status:
code: 200
message: OK
version: 1

View File

@@ -1,622 +0,0 @@
interactions:
- request:
body: '{"messages":[{"role":"system","content":"You are Research Analyst. Expert
researcher with access to remote specialized agents\nYour personal goal is:
Find and analyze information about AI developments"},{"role":"user","content":"\nCurrent
Task: Use the A2A agent to tell me what time it is.\n\nProvide your complete
response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"a2a_ids":{"description":"A2A
agent IDs to delegate to.","items":{"const":"http://localhost:9999/.well-known/agent-card.json","type":"string"},"maxItems":1,"title":"A2A
Ids","type":"array"},"message":{"description":"The message content. If is_a2a=true,
this is sent to the A2A agent. If is_a2a=false, this is your final answer ending
the conversation.","title":"Message","type":"string"},"is_a2a":{"description":"Set
to false when the remote agent has answered your question - extract their answer
and return it as your final message. Set to true ONLY if you need to ask a NEW,
DIFFERENT question. NEVER repeat the same request - if the conversation history
shows the agent already answered, set is_a2a=false immediately.","title":"Is
A2A","type":"boolean"}},"required":["a2a_ids","message","is_a2a"],"title":"AgentResponse","type":"object","additionalProperties":false},"name":"AgentResponse","strict":true}},"stream":false}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1356'
content-type:
- application/json
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-helper-method:
- beta.chat.completions.parse
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D3qP8bKsdUDPFNPsCgA6XfTcDS5RQ\",\n \"object\":
\"chat.completion\",\n \"created\": 1769808798,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"{\\\"a2a_ids\\\":[\\\"http://localhost:9999/.well-known/agent-card.json\\\"],\\\"message\\\":\\\"What
time is it currently?\\\",\\\"is_a2a\\\":true}\",\n \"refusal\": null,\n
\ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\":
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 269,\n \"completion_tokens\":
37,\n \"total_tokens\": 306,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_e01c6f58e1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Fri, 30 Jan 2026 21:33:19 GMT
Server:
- cloudflare
Set-Cookie:
- SET-COOKIE-XXX
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '959'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: '{"id":"bf41cc3b-7d95-4456-af9b-4dc0c1f3837b","jsonrpc":"2.0","method":"message/stream","params":{"configuration":{"acceptedOutputModes":["application/json"],"blocking":true},"message":{"kind":"message","messageId":"4ae6375a-3c1f-49fd-8618-6b235be6ea5f","parts":[{"kind":"text","text":"What
time is it currently?"}],"referenceTaskIds":[],"role":"user"}}}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*, text/event-stream'
accept-encoding:
- ACCEPT-ENCODING-XXX
cache-control:
- no-store
connection:
- keep-alive
content-length:
- '353'
content-type:
- application/json
host:
- localhost:9999
method: POST
uri: http://localhost:9999
response:
body:
string: "data: {\"id\":\"bf41cc3b-7d95-4456-af9b-4dc0c1f3837b\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"5d428854-df47-4326-aa42-2ca126a4ff08\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"submitted\"},\"taskId\":\"59cef95a-ba9d-486e-b9cb-3ce39dc95c08\"}}\r\n\r\ndata:
{\"id\":\"bf41cc3b-7d95-4456-af9b-4dc0c1f3837b\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"5d428854-df47-4326-aa42-2ca126a4ff08\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"working\"},\"taskId\":\"59cef95a-ba9d-486e-b9cb-3ce39dc95c08\"}}\r\n\r\ndata:
{\"id\":\"bf41cc3b-7d95-4456-af9b-4dc0c1f3837b\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"5d428854-df47-4326-aa42-2ca126a4ff08\",\"final\":true,\"kind\":\"status-update\",\"status\":{\"message\":{\"kind\":\"message\",\"messageId\":\"9d3b99cd-8853-4f78-8a4b-4c82e640a6a8\",\"parts\":[{\"kind\":\"text\",\"text\":\"[Tool:
get_time] 2026-01-30 21:33:20 UTC (UTC)\\nThe current time is 21:33:20 UTC
on January 30, 2026.\"}],\"role\":\"agent\"},\"state\":\"completed\"},\"taskId\":\"59cef95a-ba9d-486e-b9cb-3ce39dc95c08\"}}\r\n\r\n"
headers:
cache-control:
- no-store
connection:
- keep-alive
content-type:
- text/event-stream; charset=utf-8
date:
- Fri, 30 Jan 2026 21:33:19 GMT
server:
- uvicorn
transfer-encoding:
- chunked
x-accel-buffering:
- 'no'
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Research Analyst. Expert
researcher with access to remote specialized agents\nYour personal goal is:
Find and analyze information about AI developments"},{"role":"user","content":"\nCurrent
Task: Use the A2A agent to tell me what time it is.\n\nProvide your complete
response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"a2a_ids":{"description":"A2A
agent IDs to delegate to.","items":{"const":"http://localhost:9999/.well-known/agent-card.json","type":"string"},"maxItems":1,"title":"A2A
Ids","type":"array"},"message":{"description":"The message content. If is_a2a=true,
this is sent to the A2A agent. If is_a2a=false, this is your final answer ending
the conversation.","title":"Message","type":"string"},"is_a2a":{"description":"Set
to false when the remote agent has answered your question - extract their answer
and return it as your final message. Set to true ONLY if you need to ask a NEW,
DIFFERENT question. NEVER repeat the same request - if the conversation history
shows the agent already answered, set is_a2a=false immediately.","title":"Is
A2A","type":"boolean"}},"required":["a2a_ids","message","is_a2a"],"title":"AgentResponse","type":"object","additionalProperties":false},"name":"AgentResponse","strict":true}},"stream":false}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1356'
content-type:
- application/json
cookie:
- COOKIE-XXX
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-helper-method:
- beta.chat.completions.parse
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D3qPCXrfeCIRc2rGEuz7HncDWi0mt\",\n \"object\":
\"chat.completion\",\n \"created\": 1769808802,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"{\\\"a2a_ids\\\":[\\\"http://localhost:9999/.well-known/agent-card.json\\\"],\\\"message\\\":\\\"What
time is it currently?\\\",\\\"is_a2a\\\":true}\",\n \"refusal\": null,\n
\ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\":
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 269,\n \"completion_tokens\":
37,\n \"total_tokens\": 306,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_e01c6f58e1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Fri, 30 Jan 2026 21:33:23 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '762'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: '{"id":"5145c746-1044-4724-9c51-cbcb3f338fbc","jsonrpc":"2.0","method":"message/stream","params":{"configuration":{"acceptedOutputModes":["application/json"],"blocking":true},"message":{"kind":"message","messageId":"a4f9ea5b-9ccc-48f2-b5ad-517c0137af72","parts":[{"kind":"text","text":"What
time is it currently?"}],"referenceTaskIds":[],"role":"user"}}}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*, text/event-stream'
accept-encoding:
- ACCEPT-ENCODING-XXX
cache-control:
- no-store
connection:
- keep-alive
content-length:
- '353'
content-type:
- application/json
host:
- localhost:9999
method: POST
uri: http://localhost:9999
response:
body:
string: "data: {\"id\":\"5145c746-1044-4724-9c51-cbcb3f338fbc\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"23a29826-dfe4-4c2b-bca2-8c3b69baef39\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"submitted\"},\"taskId\":\"3ef1907c-b2b1-4b3b-a9ca-80678639aa6b\"}}\r\n\r\ndata:
{\"id\":\"5145c746-1044-4724-9c51-cbcb3f338fbc\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"23a29826-dfe4-4c2b-bca2-8c3b69baef39\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"working\"},\"taskId\":\"3ef1907c-b2b1-4b3b-a9ca-80678639aa6b\"}}\r\n\r\ndata:
{\"id\":\"5145c746-1044-4724-9c51-cbcb3f338fbc\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"23a29826-dfe4-4c2b-bca2-8c3b69baef39\",\"final\":true,\"kind\":\"status-update\",\"status\":{\"message\":{\"kind\":\"message\",\"messageId\":\"4fd468de-7e34-4e88-9635-2b13c4acca32\",\"parts\":[{\"kind\":\"text\",\"text\":\"[Tool:
get_time] 2026-01-30 21:33:24 UTC (UTC)\\nThe current time is 21:33:24 UTC.\"}],\"role\":\"agent\"},\"state\":\"completed\"},\"taskId\":\"3ef1907c-b2b1-4b3b-a9ca-80678639aa6b\"}}\r\n\r\n"
headers:
cache-control:
- no-store
connection:
- keep-alive
content-type:
- text/event-stream; charset=utf-8
date:
- Fri, 30 Jan 2026 21:33:23 GMT
server:
- uvicorn
transfer-encoding:
- chunked
x-accel-buffering:
- 'no'
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Research Analyst. Expert
researcher with access to remote specialized agents\nYour personal goal is:
Find and analyze information about AI developments"},{"role":"user","content":"\nCurrent
Task: Use the A2A agent to tell me what time it is.\n\nProvide your complete
response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"a2a_ids":{"description":"A2A
agent IDs to delegate to.","items":{"const":"http://localhost:9999/.well-known/agent-card.json","type":"string"},"maxItems":1,"title":"A2A
Ids","type":"array"},"message":{"description":"The message content. If is_a2a=true,
this is sent to the A2A agent. If is_a2a=false, this is your final answer ending
the conversation.","title":"Message","type":"string"},"is_a2a":{"description":"Set
to false when the remote agent has answered your question - extract their answer
and return it as your final message. Set to true ONLY if you need to ask a NEW,
DIFFERENT question. NEVER repeat the same request - if the conversation history
shows the agent already answered, set is_a2a=false immediately.","title":"Is
A2A","type":"boolean"}},"required":["a2a_ids","message","is_a2a"],"title":"AgentResponse","type":"object","additionalProperties":false},"name":"AgentResponse","strict":true}},"stream":false}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1356'
content-type:
- application/json
cookie:
- COOKIE-XXX
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-helper-method:
- beta.chat.completions.parse
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D3qPFhtpO4QMdnHFH73Fum0rpqCi5\",\n \"object\":
\"chat.completion\",\n \"created\": 1769808805,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"{\\\"a2a_ids\\\":[\\\"http://localhost:9999/.well-known/agent-card.json\\\"],\\\"message\\\":\\\"What
is the current time?\\\",\\\"is_a2a\\\":true}\",\n \"refusal\": null,\n
\ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\":
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 269,\n \"completion_tokens\":
37,\n \"total_tokens\": 306,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_e01c6f58e1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Fri, 30 Jan 2026 21:33:26 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '700'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: '{"id":"fa7576f7-dbfb-4605-ad70-a0ecadf0f1ac","jsonrpc":"2.0","method":"message/stream","params":{"configuration":{"acceptedOutputModes":["application/json"],"blocking":true},"message":{"kind":"message","messageId":"ff083812-cb3a-42d3-b410-7941106f68ac","parts":[{"kind":"text","text":"What
is the current time?"}],"referenceTaskIds":[],"role":"user"}}}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*, text/event-stream'
accept-encoding:
- ACCEPT-ENCODING-XXX
cache-control:
- no-store
connection:
- keep-alive
content-length:
- '352'
content-type:
- application/json
host:
- localhost:9999
method: POST
uri: http://localhost:9999
response:
body:
string: "data: {\"id\":\"fa7576f7-dbfb-4605-ad70-a0ecadf0f1ac\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"0eb501cf-ef11-4b46-b17d-639fb1a9aa3a\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"submitted\"},\"taskId\":\"a8c1ccbf-6f13-4894-a4d2-d87d0c3b9ffb\"}}\r\n\r\ndata:
{\"id\":\"fa7576f7-dbfb-4605-ad70-a0ecadf0f1ac\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"0eb501cf-ef11-4b46-b17d-639fb1a9aa3a\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"working\"},\"taskId\":\"a8c1ccbf-6f13-4894-a4d2-d87d0c3b9ffb\"}}\r\n\r\ndata:
{\"id\":\"fa7576f7-dbfb-4605-ad70-a0ecadf0f1ac\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"0eb501cf-ef11-4b46-b17d-639fb1a9aa3a\",\"final\":true,\"kind\":\"status-update\",\"status\":{\"message\":{\"kind\":\"message\",\"messageId\":\"2a5368d0-ac8d-4c7c-b272-a711b96bf277\",\"parts\":[{\"kind\":\"text\",\"text\":\"[Tool:
get_time] 2026-01-30 21:33:27 UTC (UTC)\\nThe current time is 21:33:27 UTC
on January 30, 2026.\"}],\"role\":\"agent\"},\"state\":\"completed\"},\"taskId\":\"a8c1ccbf-6f13-4894-a4d2-d87d0c3b9ffb\"}}\r\n\r\n"
headers:
cache-control:
- no-store
connection:
- keep-alive
content-type:
- text/event-stream; charset=utf-8
date:
- Fri, 30 Jan 2026 21:33:26 GMT
server:
- uvicorn
transfer-encoding:
- chunked
x-accel-buffering:
- 'no'
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Research Analyst. Expert
researcher with access to remote specialized agents\nYour personal goal is:
Find and analyze information about AI developments"},{"role":"user","content":"\nCurrent
Task: Use the A2A agent to tell me what time it is.\n\nProvide your complete
response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"a2a_ids":{"description":"A2A
agent IDs to delegate to.","items":{"const":"http://localhost:9999/.well-known/agent-card.json","type":"string"},"maxItems":1,"title":"A2A
Ids","type":"array"},"message":{"description":"The message content. If is_a2a=true,
this is sent to the A2A agent. If is_a2a=false, this is your final answer ending
the conversation.","title":"Message","type":"string"},"is_a2a":{"description":"Set
to false when the remote agent has answered your question - extract their answer
and return it as your final message. Set to true ONLY if you need to ask a NEW,
DIFFERENT question. NEVER repeat the same request - if the conversation history
shows the agent already answered, set is_a2a=false immediately.","title":"Is
A2A","type":"boolean"}},"required":["a2a_ids","message","is_a2a"],"title":"AgentResponse","type":"object","additionalProperties":false},"name":"AgentResponse","strict":true}},"stream":false}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1356'
content-type:
- application/json
cookie:
- COOKIE-XXX
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-helper-method:
- beta.chat.completions.parse
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D3qPJqK3Nr9ySPgvb0LGVLS3ZGxGi\",\n \"object\":
\"chat.completion\",\n \"created\": 1769808809,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"{\\\"a2a_ids\\\":[\\\"http://localhost:9999/.well-known/agent-card.json\\\"],\\\"message\\\":\\\"What
is the current time?\\\",\\\"is_a2a\\\":true}\",\n \"refusal\": null,\n
\ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\":
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 269,\n \"completion_tokens\":
37,\n \"total_tokens\": 306,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_e01c6f58e1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Fri, 30 Jan 2026 21:33:30 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '877'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
version: 1

View File

@@ -1,662 +0,0 @@
interactions:
- request:
body: ''
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*'
accept-encoding:
- ACCEPT-ENCODING-XXX
connection:
- keep-alive
host:
- localhost:9999
method: GET
uri: http://localhost:9999/.well-known/agent-card.json
response:
body:
string: '{"capabilities":{"pushNotifications":true,"streaming":true},"defaultInputModes":["text/plain","application/json"],"defaultOutputModes":["text/plain","application/json"],"description":"An
AI assistant powered by OpenAI GPT with calculator and time tools. Ask questions,
perform calculations, or get the current time in any timezone.","name":"GPT
Assistant","preferredTransport":"JSONRPC","protocolVersion":"0.3.0","skills":[{"description":"Have
a general conversation with the AI assistant. Ask questions, get explanations,
or just chat.","examples":["Hello, how are you?","Explain quantum computing
in simple terms","What can you help me with?"],"id":"conversation","name":"General
Conversation","tags":["chat","conversation","general"]},{"description":"Perform
mathematical calculations including arithmetic, exponents, and more.","examples":["What
is 25 * 17?","Calculate 2^10","What''s (100 + 50) / 3?"],"id":"calculator","name":"Calculator","tags":["math","calculator","arithmetic"]},{"description":"Get
the current date and time in any timezone.","examples":["What time is it?","What''s
the current time in Tokyo?","What''s today''s date in New York?"],"id":"time","name":"Current
Time","tags":["time","date","timezone"]}],"url":"http://localhost:9999","version":"1.0.0"}'
headers:
content-length:
- '1272'
content-type:
- application/json
date:
- Fri, 30 Jan 2026 21:33:04 GMT
server:
- uvicorn
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Research Analyst. Expert
researcher with access to remote specialized agents\nYour personal goal is:
Find and analyze information about AI developments"},{"role":"user","content":"\nCurrent
Task: Ask the remote A2A agent to calculate 25 times 17.\n\nProvide your complete
response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"a2a_ids":{"description":"A2A
agent IDs to delegate to.","items":{"const":"http://localhost:9999/.well-known/agent-card.json","type":"string"},"maxItems":1,"title":"A2A
Ids","type":"array"},"message":{"description":"The message content. If is_a2a=true,
this is sent to the A2A agent. If is_a2a=false, this is your final answer ending
the conversation.","title":"Message","type":"string"},"is_a2a":{"description":"Set
to false when the remote agent has answered your question - extract their answer
and return it as your final message. Set to true ONLY if you need to ask a NEW,
DIFFERENT question. NEVER repeat the same request - if the conversation history
shows the agent already answered, set is_a2a=false immediately.","title":"Is
A2A","type":"boolean"}},"required":["a2a_ids","message","is_a2a"],"title":"AgentResponse","type":"object","additionalProperties":false},"name":"AgentResponse","strict":true}},"stream":false}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1361'
content-type:
- application/json
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-helper-method:
- beta.chat.completions.parse
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D3qOvSd5OuQsjAaD7aJtOwEXE0l29\",\n \"object\":
\"chat.completion\",\n \"created\": 1769808785,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"{\\\"a2a_ids\\\":[\\\"http://localhost:9999/.well-known/agent-card.json\\\"],\\\"message\\\":\\\"What
is 25 times 17?\\\",\\\"is_a2a\\\":true}\",\n \"refusal\": null,\n
\ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\":
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 270,\n \"completion_tokens\":
39,\n \"total_tokens\": 309,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_e01c6f58e1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Fri, 30 Jan 2026 21:33:05 GMT
Server:
- cloudflare
Set-Cookie:
- SET-COOKIE-XXX
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '697'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: '{"id":"ac9d0ac3-5e35-48c7-a2cd-59e2a66966dd","jsonrpc":"2.0","method":"message/stream","params":{"configuration":{"acceptedOutputModes":["application/json"],"blocking":true},"message":{"kind":"message","messageId":"1cccc8dc-7253-4bbd-8e32-bed4004181c8","parts":[{"kind":"text","text":"What
is 25 times 17?"}],"referenceTaskIds":[],"role":"user"}}}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*, text/event-stream'
accept-encoding:
- ACCEPT-ENCODING-XXX
cache-control:
- no-store
connection:
- keep-alive
content-length:
- '347'
content-type:
- application/json
host:
- localhost:9999
method: POST
uri: http://localhost:9999
response:
body:
string: "data: {\"id\":\"ac9d0ac3-5e35-48c7-a2cd-59e2a66966dd\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"5da04daa-d638-4124-8f65-7a73627679ad\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"submitted\"},\"taskId\":\"837a9eed-eea6-4d12-94e4-545c6869efc6\"}}\r\n\r\ndata:
{\"id\":\"ac9d0ac3-5e35-48c7-a2cd-59e2a66966dd\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"5da04daa-d638-4124-8f65-7a73627679ad\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"working\"},\"taskId\":\"837a9eed-eea6-4d12-94e4-545c6869efc6\"}}\r\n\r\ndata:
{\"id\":\"ac9d0ac3-5e35-48c7-a2cd-59e2a66966dd\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"5da04daa-d638-4124-8f65-7a73627679ad\",\"final\":true,\"kind\":\"status-update\",\"status\":{\"message\":{\"kind\":\"message\",\"messageId\":\"d01e8e15-61d1-4b3d-a356-95d3add99291\",\"parts\":[{\"kind\":\"text\",\"text\":\"[Tool:
calculator] 25 * 17 = 425\\n25 times 17 is 425.\"}],\"role\":\"agent\"},\"state\":\"completed\"},\"taskId\":\"837a9eed-eea6-4d12-94e4-545c6869efc6\"}}\r\n\r\n"
headers:
cache-control:
- no-store
connection:
- keep-alive
content-type:
- text/event-stream; charset=utf-8
date:
- Fri, 30 Jan 2026 21:33:04 GMT
server:
- uvicorn
transfer-encoding:
- chunked
x-accel-buffering:
- 'no'
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Research Analyst. Expert
researcher with access to remote specialized agents\nYour personal goal is:
Find and analyze information about AI developments"},{"role":"user","content":"\nCurrent
Task: Ask the remote A2A agent to calculate 25 times 17.\n\nProvide your complete
response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"a2a_ids":{"description":"A2A
agent IDs to delegate to.","items":{"const":"http://localhost:9999/.well-known/agent-card.json","type":"string"},"maxItems":1,"title":"A2A
Ids","type":"array"},"message":{"description":"The message content. If is_a2a=true,
this is sent to the A2A agent. If is_a2a=false, this is your final answer ending
the conversation.","title":"Message","type":"string"},"is_a2a":{"description":"Set
to false when the remote agent has answered your question - extract their answer
and return it as your final message. Set to true ONLY if you need to ask a NEW,
DIFFERENT question. NEVER repeat the same request - if the conversation history
shows the agent already answered, set is_a2a=false immediately.","title":"Is
A2A","type":"boolean"}},"required":["a2a_ids","message","is_a2a"],"title":"AgentResponse","type":"object","additionalProperties":false},"name":"AgentResponse","strict":true}},"stream":false}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1361'
content-type:
- application/json
cookie:
- COOKIE-XXX
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-helper-method:
- beta.chat.completions.parse
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D3qOzCessPAoHkJZsUYJIw48NwJrS\",\n \"object\":
\"chat.completion\",\n \"created\": 1769808789,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"{\\\"a2a_ids\\\":[\\\"http://localhost:9999/.well-known/agent-card.json\\\"],\\\"message\\\":\\\"Please
calculate 25 times 17.\\\",\\\"is_a2a\\\":true}\",\n \"refusal\": null,\n
\ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\":
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 270,\n \"completion_tokens\":
38,\n \"total_tokens\": 308,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_e01c6f58e1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Fri, 30 Jan 2026 21:33:10 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '666'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: '{"id":"89e94967-f28d-4639-9c9c-87719483d15d","jsonrpc":"2.0","method":"message/stream","params":{"configuration":{"acceptedOutputModes":["application/json"],"blocking":true},"message":{"kind":"message","messageId":"6ef5d359-8604-4520-a539-518e88d75456","parts":[{"kind":"text","text":"Please
calculate 25 times 17."}],"referenceTaskIds":[],"role":"user"}}}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*, text/event-stream'
accept-encoding:
- ACCEPT-ENCODING-XXX
cache-control:
- no-store
connection:
- keep-alive
content-length:
- '356'
content-type:
- application/json
host:
- localhost:9999
method: POST
uri: http://localhost:9999
response:
body:
string: "data: {\"id\":\"89e94967-f28d-4639-9c9c-87719483d15d\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"cdd17068-acc2-462a-9e9d-bf1a2403a432\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"submitted\"},\"taskId\":\"c96047bb-5ddf-410a-a4bd-03c6bcd4457e\"}}\r\n\r\ndata:
{\"id\":\"89e94967-f28d-4639-9c9c-87719483d15d\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"cdd17068-acc2-462a-9e9d-bf1a2403a432\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"working\"},\"taskId\":\"c96047bb-5ddf-410a-a4bd-03c6bcd4457e\"}}\r\n\r\ndata:
{\"id\":\"89e94967-f28d-4639-9c9c-87719483d15d\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"cdd17068-acc2-462a-9e9d-bf1a2403a432\",\"final\":true,\"kind\":\"status-update\",\"status\":{\"message\":{\"kind\":\"message\",\"messageId\":\"4138a4a4-217a-4f99-a6b1-970229a6bedd\",\"parts\":[{\"kind\":\"text\",\"text\":\"[Tool:
calculator] 25 * 17 = 425\\nThe result of 25 times 17 is 425.\"}],\"role\":\"agent\"},\"state\":\"completed\"},\"taskId\":\"c96047bb-5ddf-410a-a4bd-03c6bcd4457e\"}}\r\n\r\n"
headers:
cache-control:
- no-store
connection:
- keep-alive
content-type:
- text/event-stream; charset=utf-8
date:
- Fri, 30 Jan 2026 21:33:10 GMT
server:
- uvicorn
transfer-encoding:
- chunked
x-accel-buffering:
- 'no'
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Research Analyst. Expert
researcher with access to remote specialized agents\nYour personal goal is:
Find and analyze information about AI developments"},{"role":"user","content":"\nCurrent
Task: Ask the remote A2A agent to calculate 25 times 17.\n\nProvide your complete
response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"a2a_ids":{"description":"A2A
agent IDs to delegate to.","items":{"const":"http://localhost:9999/.well-known/agent-card.json","type":"string"},"maxItems":1,"title":"A2A
Ids","type":"array"},"message":{"description":"The message content. If is_a2a=true,
this is sent to the A2A agent. If is_a2a=false, this is your final answer ending
the conversation.","title":"Message","type":"string"},"is_a2a":{"description":"Set
to false when the remote agent has answered your question - extract their answer
and return it as your final message. Set to true ONLY if you need to ask a NEW,
DIFFERENT question. NEVER repeat the same request - if the conversation history
shows the agent already answered, set is_a2a=false immediately.","title":"Is
A2A","type":"boolean"}},"required":["a2a_ids","message","is_a2a"],"title":"AgentResponse","type":"object","additionalProperties":false},"name":"AgentResponse","strict":true}},"stream":false}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1361'
content-type:
- application/json
cookie:
- COOKIE-XXX
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-helper-method:
- beta.chat.completions.parse
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D3qP2eXgsxYsRuFnwu5tCdE8jZyiI\",\n \"object\":
\"chat.completion\",\n \"created\": 1769808792,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"{\\\"a2a_ids\\\":[\\\"http://localhost:9999/.well-known/agent-card.json\\\"],\\\"message\\\":\\\"Calculate
the product of 25 and 17.\\\",\\\"is_a2a\\\":true}\",\n \"refusal\":
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
270,\n \"completion_tokens\": 40,\n \"total_tokens\": 310,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_e01c6f58e1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Fri, 30 Jan 2026 21:33:13 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '977'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: '{"id":"be0426a2-3de6-44ce-b7de-22b5992b5025","jsonrpc":"2.0","method":"message/stream","params":{"configuration":{"acceptedOutputModes":["application/json"],"blocking":true},"message":{"kind":"message","messageId":"2974dcee-5712-40df-aa41-4877f36c4749","parts":[{"kind":"text","text":"Calculate
the product of 25 and 17."}],"referenceTaskIds":[],"role":"user"}}}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*, text/event-stream'
accept-encoding:
- ACCEPT-ENCODING-XXX
cache-control:
- no-store
connection:
- keep-alive
content-length:
- '362'
content-type:
- application/json
host:
- localhost:9999
method: POST
uri: http://localhost:9999
response:
body:
string: "data: {\"id\":\"be0426a2-3de6-44ce-b7de-22b5992b5025\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"f9311c55-c6ef-413a-818b-d0ebeb8466be\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"submitted\"},\"taskId\":\"fd2d5eee-8bd6-4ead-b96b-917782444d10\"}}\r\n\r\ndata:
{\"id\":\"be0426a2-3de6-44ce-b7de-22b5992b5025\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"f9311c55-c6ef-413a-818b-d0ebeb8466be\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"working\"},\"taskId\":\"fd2d5eee-8bd6-4ead-b96b-917782444d10\"}}\r\n\r\ndata:
{\"id\":\"be0426a2-3de6-44ce-b7de-22b5992b5025\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"f9311c55-c6ef-413a-818b-d0ebeb8466be\",\"final\":true,\"kind\":\"status-update\",\"status\":{\"message\":{\"kind\":\"message\",\"messageId\":\"a27f8c49-b5d4-4421-b7cf-9b7455273f34\",\"parts\":[{\"kind\":\"text\",\"text\":\"[Tool:
calculator] 25 * 17 = 425\\nThe product of 25 and 17 is 425.\"}],\"role\":\"agent\"},\"state\":\"completed\"},\"taskId\":\"fd2d5eee-8bd6-4ead-b96b-917782444d10\"}}\r\n\r\n"
headers:
cache-control:
- no-store
connection:
- keep-alive
content-type:
- text/event-stream; charset=utf-8
date:
- Fri, 30 Jan 2026 21:33:13 GMT
server:
- uvicorn
transfer-encoding:
- chunked
x-accel-buffering:
- 'no'
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Research Analyst. Expert
researcher with access to remote specialized agents\nYour personal goal is:
Find and analyze information about AI developments"},{"role":"user","content":"\nCurrent
Task: Ask the remote A2A agent to calculate 25 times 17.\n\nProvide your complete
response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"a2a_ids":{"description":"A2A
agent IDs to delegate to.","items":{"const":"http://localhost:9999/.well-known/agent-card.json","type":"string"},"maxItems":1,"title":"A2A
Ids","type":"array"},"message":{"description":"The message content. If is_a2a=true,
this is sent to the A2A agent. If is_a2a=false, this is your final answer ending
the conversation.","title":"Message","type":"string"},"is_a2a":{"description":"Set
to false when the remote agent has answered your question - extract their answer
and return it as your final message. Set to true ONLY if you need to ask a NEW,
DIFFERENT question. NEVER repeat the same request - if the conversation history
shows the agent already answered, set is_a2a=false immediately.","title":"Is
A2A","type":"boolean"}},"required":["a2a_ids","message","is_a2a"],"title":"AgentResponse","type":"object","additionalProperties":false},"name":"AgentResponse","strict":true}},"stream":false}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1361'
content-type:
- application/json
cookie:
- COOKIE-XXX
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-helper-method:
- beta.chat.completions.parse
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D3qP6z5b62nfY2HkvSobGqRaK0JLD\",\n \"object\":
\"chat.completion\",\n \"created\": 1769808796,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"{\\\"a2a_ids\\\":[\\\"http://localhost:9999/.well-known/agent-card.json\\\"],\\\"message\\\":\\\"Calculate
25 times 17.\\\",\\\"is_a2a\\\":true}\",\n \"refusal\": null,\n \"annotations\":
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 270,\n \"completion_tokens\":
37,\n \"total_tokens\": 307,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_e01c6f58e1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Fri, 30 Jan 2026 21:33:17 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '997'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
version: 1

View File

@@ -1,669 +0,0 @@
interactions:
- request:
body: '{"messages":[{"role":"system","content":"You are Research Analyst. Expert
researcher with access to remote specialized agents\nYour personal goal is:
Find and analyze information about AI developments"},{"role":"user","content":"\nCurrent
Task: Delegate to the remote A2A agent to explain quantum computing in simple
terms.\n\nProvide your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"a2a_ids":{"description":"A2A
agent IDs to delegate to.","items":{"const":"http://localhost:9999/.well-known/agent-card.json","type":"string"},"maxItems":1,"title":"A2A
Ids","type":"array"},"message":{"description":"The message content. If is_a2a=true,
this is sent to the A2A agent. If is_a2a=false, this is your final answer ending
the conversation.","title":"Message","type":"string"},"is_a2a":{"description":"Set
to false when the remote agent has answered your question - extract their answer
and return it as your final message. Set to true ONLY if you need to ask a NEW,
DIFFERENT question. NEVER repeat the same request - if the conversation history
shows the agent already answered, set is_a2a=false immediately.","title":"Is
A2A","type":"boolean"}},"required":["a2a_ids","message","is_a2a"],"title":"AgentResponse","type":"object","additionalProperties":false},"name":"AgentResponse","strict":true}},"stream":false}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1389'
content-type:
- application/json
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-helper-method:
- beta.chat.completions.parse
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D3qQLp4DYPeaKABFeQVDKxePblbY7\",\n \"object\":
\"chat.completion\",\n \"created\": 1769808873,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"{\\\"a2a_ids\\\":[\\\"http://localhost:9999/.well-known/agent-card.json\\\"],\\\"message\\\":\\\"Can
you explain quantum computing in simple terms, suitable for someone with no
background in the subject?\\\",\\\"is_a2a\\\":true}\",\n \"refusal\":
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
271,\n \"completion_tokens\": 50,\n \"total_tokens\": 321,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_e01c6f58e1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Fri, 30 Jan 2026 21:34:34 GMT
Server:
- cloudflare
Set-Cookie:
- SET-COOKIE-XXX
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '834'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: '{"id":"4f421e0d-c7ee-46ad-8c7b-9ff31404beba","jsonrpc":"2.0","method":"message/stream","params":{"configuration":{"acceptedOutputModes":["application/json"],"blocking":true},"message":{"kind":"message","messageId":"4b0f91a5-079c-4efe-ba3d-bee5592c67e5","parts":[{"kind":"text","text":"Can
you explain quantum computing in simple terms, suitable for someone with no
background in the subject?"}],"referenceTaskIds":[],"role":"user"}}}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*, text/event-stream'
accept-encoding:
- ACCEPT-ENCODING-XXX
cache-control:
- no-store
connection:
- keep-alive
content-length:
- '433'
content-type:
- application/json
host:
- localhost:9999
method: POST
uri: http://localhost:9999
response:
body:
string: "data: {\"id\":\"4f421e0d-c7ee-46ad-8c7b-9ff31404beba\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"cdfd9f23-8f2e-4cea-8839-cd52ce0584a7\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"submitted\"},\"taskId\":\"7c223d0c-f144-4bf8-b8d8-1c37efb04232\"}}\r\n\r\ndata:
{\"id\":\"4f421e0d-c7ee-46ad-8c7b-9ff31404beba\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"cdfd9f23-8f2e-4cea-8839-cd52ce0584a7\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"working\"},\"taskId\":\"7c223d0c-f144-4bf8-b8d8-1c37efb04232\"}}\r\n\r\ndata:
{\"id\":\"4f421e0d-c7ee-46ad-8c7b-9ff31404beba\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"cdfd9f23-8f2e-4cea-8839-cd52ce0584a7\",\"final\":true,\"kind\":\"status-update\",\"status\":{\"message\":{\"kind\":\"message\",\"messageId\":\"bdeb2af5-89c3-4368-93bd-f263b3ab40a9\",\"parts\":[{\"kind\":\"text\",\"text\":\"Quantum
computing is a new type of computing that uses the principles of quantum mechanics,
which is the science that explains how very small particles, like atoms and
electrons, behave. \\n\\nIn classical computing, the basic unit of information
is a bit, which can be either a 0 or a 1. This is like a light switch that
can either be off (0) or on (1). \\n\\nQuantum computing, on the other hand,
uses quantum bits or qubits. A qubit can be in a state of 0, 1, or both at
the same time due to a phenomenon called superposition. You can think of this
as a spinning coin that is both heads and tails while in the air, rather than
fixed to one side when it lands. \\n\\nMoreover, qubits can be linked together
through another phenomenon called entanglement. This means that the state
of one qubit can depend on the state of another, no matter how far apart they
are. This allows quantum computers to solve certain problems much more efficiently
than classical computers. \\n\\nIn summary, quantum computing harnesses the
strange and fascinating behaviors of particles at the quantum level to process
information in ways that traditional computers cannot, potentially making
them much more powerful for specific tasks, like breaking encryption or simulating
complex molecules.\"}],\"role\":\"agent\"},\"state\":\"completed\"},\"taskId\":\"7c223d0c-f144-4bf8-b8d8-1c37efb04232\"}}\r\n\r\n"
headers:
cache-control:
- no-store
connection:
- keep-alive
content-type:
- text/event-stream; charset=utf-8
date:
- Fri, 30 Jan 2026 21:34:33 GMT
server:
- uvicorn
transfer-encoding:
- chunked
x-accel-buffering:
- 'no'
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Research Analyst. Expert
researcher with access to remote specialized agents\nYour personal goal is:
Find and analyze information about AI developments"},{"role":"user","content":"\nCurrent
Task: Delegate to the remote A2A agent to explain quantum computing in simple
terms.\n\nProvide your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"a2a_ids":{"description":"A2A
agent IDs to delegate to.","items":{"const":"http://localhost:9999/.well-known/agent-card.json","type":"string"},"maxItems":1,"title":"A2A
Ids","type":"array"},"message":{"description":"The message content. If is_a2a=true,
this is sent to the A2A agent. If is_a2a=false, this is your final answer ending
the conversation.","title":"Message","type":"string"},"is_a2a":{"description":"Set
to false when the remote agent has answered your question - extract their answer
and return it as your final message. Set to true ONLY if you need to ask a NEW,
DIFFERENT question. NEVER repeat the same request - if the conversation history
shows the agent already answered, set is_a2a=false immediately.","title":"Is
A2A","type":"boolean"}},"required":["a2a_ids","message","is_a2a"],"title":"AgentResponse","type":"object","additionalProperties":false},"name":"AgentResponse","strict":true}},"stream":false}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1389'
content-type:
- application/json
cookie:
- COOKIE-XXX
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-helper-method:
- beta.chat.completions.parse
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D3qQWQTMdXD4Ug72rmgl1eWkm40zL\",\n \"object\":
\"chat.completion\",\n \"created\": 1769808884,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"{\\\"a2a_ids\\\":[\\\"http://localhost:9999/.well-known/agent-card.json\\\"],\\\"message\\\":\\\"Please
explain quantum computing in simple terms suitable for a general audience.\\\",\\\"is_a2a\\\":true}\",\n
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\":
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
271,\n \"completion_tokens\": 43,\n \"total_tokens\": 314,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_e01c6f58e1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Fri, 30 Jan 2026 21:34:45 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '714'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: '{"id":"f5ff9e1d-598f-4b68-94ed-e7f2b942c467","jsonrpc":"2.0","method":"message/stream","params":{"configuration":{"acceptedOutputModes":["application/json"],"blocking":true},"message":{"kind":"message","messageId":"b4d6f61a-c854-487a-b238-25f03e0a5ef7","parts":[{"kind":"text","text":"Please
explain quantum computing in simple terms suitable for a general audience."}],"referenceTaskIds":[],"role":"user"}}}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*, text/event-stream'
accept-encoding:
- ACCEPT-ENCODING-XXX
cache-control:
- no-store
connection:
- keep-alive
content-length:
- '408'
content-type:
- application/json
host:
- localhost:9999
method: POST
uri: http://localhost:9999
response:
body:
string: "data: {\"id\":\"f5ff9e1d-598f-4b68-94ed-e7f2b942c467\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"93c80ac2-eaef-4eeb-af7a-5aaa15f5b9ad\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"submitted\"},\"taskId\":\"a4288a18-56d4-4270-ad4a-da1efe6cf84b\"}}\r\n\r\ndata:
{\"id\":\"f5ff9e1d-598f-4b68-94ed-e7f2b942c467\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"93c80ac2-eaef-4eeb-af7a-5aaa15f5b9ad\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"working\"},\"taskId\":\"a4288a18-56d4-4270-ad4a-da1efe6cf84b\"}}\r\n\r\ndata:
{\"id\":\"f5ff9e1d-598f-4b68-94ed-e7f2b942c467\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"93c80ac2-eaef-4eeb-af7a-5aaa15f5b9ad\",\"final\":true,\"kind\":\"status-update\",\"status\":{\"message\":{\"kind\":\"message\",\"messageId\":\"c7303248-eebb-48a4-be78-791ac6d11083\",\"parts\":[{\"kind\":\"text\",\"text\":\"Quantum
computing is a type of computing that uses the principles of quantum mechanics,
which is the science that explains how very small particles, like atoms and
photons, behave. Unlike traditional computers that use bits as the smallest
unit of data (which can be either 0 or 1), quantum computers use quantum bits
or qubits. \\n\\nHere\u2019s a simple breakdown:\\n- **Qubits**: A qubit can
be in a state of 0, 1, or both at the same time (thanks to a property called
superposition). This allows quantum computers to process a vast amount of
possibilities simultaneously.\\n- **Entanglement**: Qubits can also be entangled,
meaning the state of one qubit can depend on the state of another, no matter
how far apart they are. This creates a powerful link that can improve computing
efficiency.\\n- **Quantum Speedup**: Because of superposition and entanglement,
quantum computers can solve certain complex problems much faster than traditional
computers.\\n\\nIn simple terms, you can think of quantum computing as a new
way of doing math and problem-solving that can tackle really difficult tasks
much faster than the computers we use today. However, quantum computing is
still in its early stages and is not yet widely used for everyday tasks.\"}],\"role\":\"agent\"},\"state\":\"completed\"},\"taskId\":\"a4288a18-56d4-4270-ad4a-da1efe6cf84b\"}}\r\n\r\n"
headers:
cache-control:
- no-store
connection:
- keep-alive
content-type:
- text/event-stream; charset=utf-8
date:
- Fri, 30 Jan 2026 21:34:44 GMT
server:
- uvicorn
transfer-encoding:
- chunked
x-accel-buffering:
- 'no'
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Research Analyst. Expert
researcher with access to remote specialized agents\nYour personal goal is:
Find and analyze information about AI developments"},{"role":"user","content":"\nCurrent
Task: Delegate to the remote A2A agent to explain quantum computing in simple
terms.\n\nProvide your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"a2a_ids":{"description":"A2A
agent IDs to delegate to.","items":{"const":"http://localhost:9999/.well-known/agent-card.json","type":"string"},"maxItems":1,"title":"A2A
Ids","type":"array"},"message":{"description":"The message content. If is_a2a=true,
this is sent to the A2A agent. If is_a2a=false, this is your final answer ending
the conversation.","title":"Message","type":"string"},"is_a2a":{"description":"Set
to false when the remote agent has answered your question - extract their answer
and return it as your final message. Set to true ONLY if you need to ask a NEW,
DIFFERENT question. NEVER repeat the same request - if the conversation history
shows the agent already answered, set is_a2a=false immediately.","title":"Is
A2A","type":"boolean"}},"required":["a2a_ids","message","is_a2a"],"title":"AgentResponse","type":"object","additionalProperties":false},"name":"AgentResponse","strict":true}},"stream":false}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1389'
content-type:
- application/json
cookie:
- COOKIE-XXX
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-helper-method:
- beta.chat.completions.parse
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D3qQhcQzT9KEetchwHOgQTpBYRw7w\",\n \"object\":
\"chat.completion\",\n \"created\": 1769808895,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"{\\\"a2a_ids\\\":[\\\"http://localhost:9999/.well-known/agent-card.json\\\"],\\\"message\\\":\\\"Please
explain quantum computing in simple terms suitable for someone without a technical
background.\\\",\\\"is_a2a\\\":true}\",\n \"refusal\": null,\n \"annotations\":
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 271,\n \"completion_tokens\":
45,\n \"total_tokens\": 316,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_e01c6f58e1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Fri, 30 Jan 2026 21:34:55 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '786'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: '{"id":"10814663-4e33-4777-ac5d-ea9098a690e9","jsonrpc":"2.0","method":"message/stream","params":{"configuration":{"acceptedOutputModes":["application/json"],"blocking":true},"message":{"kind":"message","messageId":"a112c8d7-8cf8-47bc-b84d-a7c8d34326d8","parts":[{"kind":"text","text":"Please
explain quantum computing in simple terms suitable for someone without a technical
background."}],"referenceTaskIds":[],"role":"user"}}}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*, text/event-stream'
accept-encoding:
- ACCEPT-ENCODING-XXX
cache-control:
- no-store
connection:
- keep-alive
content-length:
- '428'
content-type:
- application/json
host:
- localhost:9999
method: POST
uri: http://localhost:9999
response:
body:
string: "data: {\"id\":\"10814663-4e33-4777-ac5d-ea9098a690e9\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"754dce7b-b2d5-426e-aaf8-3b5aa886984a\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"submitted\"},\"taskId\":\"2745bfc5-138f-4773-ad96-98d463fcbb3e\"}}\r\n\r\ndata:
{\"id\":\"10814663-4e33-4777-ac5d-ea9098a690e9\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"754dce7b-b2d5-426e-aaf8-3b5aa886984a\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"working\"},\"taskId\":\"2745bfc5-138f-4773-ad96-98d463fcbb3e\"}}\r\n\r\ndata:
{\"id\":\"10814663-4e33-4777-ac5d-ea9098a690e9\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"754dce7b-b2d5-426e-aaf8-3b5aa886984a\",\"final\":true,\"kind\":\"status-update\",\"status\":{\"message\":{\"kind\":\"message\",\"messageId\":\"5cfcafcb-5770-4c5a-9724-54dd475a12a1\",\"parts\":[{\"kind\":\"text\",\"text\":\"Quantum
computing is a type of computing that uses the principles of quantum mechanics,
which is the science that explains how very small particles, like atoms and
photons, behave. \\n\\nIn traditional computers, information is processed
using bits, which can be either 0 or 1. Think of bits like light switches
that can be turned off (0) or on (1). \\n\\nQuantum computers, on the other
hand, use quantum bits, or qubits. A qubit can be 0, 1, or both at the same
time due to a property called superposition. This means that quantum computers
can process a vast amount of information at once compared to regular computers.
\\n\\nAnother important concept in quantum computing is entanglement, where
qubits become linked in such a way that the state of one qubit can depend
on the state of another, no matter how far apart they are. This can lead to
faster processing speeds and the ability to solve complex problems more efficiently.
\\n\\nIn summary, quantum computing is like a supercharged version of traditional
computing that can perform many calculations simultaneously by taking advantage
of the quirks of quantum physics.\"}],\"role\":\"agent\"},\"state\":\"completed\"},\"taskId\":\"2745bfc5-138f-4773-ad96-98d463fcbb3e\"}}\r\n\r\n"
headers:
cache-control:
- no-store
connection:
- keep-alive
content-type:
- text/event-stream; charset=utf-8
date:
- Fri, 30 Jan 2026 21:34:54 GMT
server:
- uvicorn
transfer-encoding:
- chunked
x-accel-buffering:
- 'no'
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Research Analyst. Expert
researcher with access to remote specialized agents\nYour personal goal is:
Find and analyze information about AI developments"},{"role":"user","content":"\nCurrent
Task: Delegate to the remote A2A agent to explain quantum computing in simple
terms.\n\nProvide your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"a2a_ids":{"description":"A2A
agent IDs to delegate to.","items":{"const":"http://localhost:9999/.well-known/agent-card.json","type":"string"},"maxItems":1,"title":"A2A
Ids","type":"array"},"message":{"description":"The message content. If is_a2a=true,
this is sent to the A2A agent. If is_a2a=false, this is your final answer ending
the conversation.","title":"Message","type":"string"},"is_a2a":{"description":"Set
to false when the remote agent has answered your question - extract their answer
and return it as your final message. Set to true ONLY if you need to ask a NEW,
DIFFERENT question. NEVER repeat the same request - if the conversation history
shows the agent already answered, set is_a2a=false immediately.","title":"Is
A2A","type":"boolean"}},"required":["a2a_ids","message","is_a2a"],"title":"AgentResponse","type":"object","additionalProperties":false},"name":"AgentResponse","strict":true}},"stream":false}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1389'
content-type:
- application/json
cookie:
- COOKIE-XXX
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-helper-method:
- beta.chat.completions.parse
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D3qQqDYsO5v1fvanhv4G5kXB0YHwW\",\n \"object\":
\"chat.completion\",\n \"created\": 1769808904,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"{\\\"a2a_ids\\\":[\\\"http://localhost:9999/.well-known/agent-card.json\\\"],\\\"message\\\":\\\"Please
explain quantum computing in simple terms.\\\",\\\"is_a2a\\\":true}\",\n \"refusal\":
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
271,\n \"completion_tokens\": 38,\n \"total_tokens\": 309,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_e01c6f58e1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Fri, 30 Jan 2026 21:35:05 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '918'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
version: 1

View File

@@ -1,216 +0,0 @@
interactions:
- request:
body: '{"messages":[{"role":"system","content":"You are Research Analyst. Expert
researcher\nYour personal goal is: Find information"},{"role":"user","content":"\nCurrent
Task: What is 2 + 2?\n\nProvide your complete response:"}],"model":"gpt-4.1-mini"}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '246'
content-type:
- application/json
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.13.3
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D8WiGEDTbwLcrRjnvxgSpt9XISVwN\",\n \"object\":
\"chat.completion\",\n \"created\": 1770924744,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"The sum of 2 + 2 is 4.\",\n \"refusal\":
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
43,\n \"completion_tokens\": 12,\n \"total_tokens\": 55,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_75546bd1a7\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Thu, 12 Feb 2026 19:32:25 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '988'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
set-cookie:
- SET-COOKIE-XXX
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Research Analyst. Expert
researcher\nYour personal goal is: Find information"},{"role":"user","content":"\nCurrent
Task: What is 2 + 2?\n\nProvide your complete response:"}],"model":"gpt-4.1-mini"}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '246'
content-type:
- application/json
cookie:
- COOKIE-XXX
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.13.3
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D8WiHquzE7A8dBalX3phbPaOSXEnQ\",\n \"object\":
\"chat.completion\",\n \"created\": 1770924745,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"The sum of 2 + 2 is 4.\",\n \"refusal\":
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
43,\n \"completion_tokens\": 12,\n \"total_tokens\": 55,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_75546bd1a7\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Thu, 12 Feb 2026 19:32:26 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '415'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
set-cookie:
- SET-COOKIE-XXX
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
version: 1

View File

@@ -1,623 +0,0 @@
interactions:
- request:
body: '{"messages":[{"role":"system","content":"You are Research Analyst. Expert
researcher with access to remote specialized agents\nYour personal goal is:
Find and analyze information about AI developments"},{"role":"user","content":"\nCurrent
Task: Delegate to the A2A agent to find the current time in Tokyo.\n\nProvide
your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"a2a_ids":{"description":"A2A
agent IDs to delegate to.","items":{"const":"http://localhost:9999/.well-known/agent-card.json","type":"string"},"maxItems":1,"title":"A2A
Ids","type":"array"},"message":{"description":"The message content. If is_a2a=true,
this is sent to the A2A agent. If is_a2a=false, this is your final answer ending
the conversation.","title":"Message","type":"string"},"is_a2a":{"description":"Set
to false when the remote agent has answered your question - extract their answer
and return it as your final message. Set to true ONLY if you need to ask a NEW,
DIFFERENT question. NEVER repeat the same request - if the conversation history
shows the agent already answered, set is_a2a=false immediately.","title":"Is
A2A","type":"boolean"}},"required":["a2a_ids","message","is_a2a"],"title":"AgentResponse","type":"object","additionalProperties":false},"name":"AgentResponse","strict":true}},"stream":false}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1371'
content-type:
- application/json
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-helper-method:
- beta.chat.completions.parse
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D3qQ7mKtA8Q7mQ8dWafrjQAtMBV7G\",\n \"object\":
\"chat.completion\",\n \"created\": 1769808859,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"{\\\"a2a_ids\\\":[\\\"http://localhost:9999/.well-known/agent-card.json\\\"],\\\"message\\\":\\\"What
is the current time in Tokyo?\\\",\\\"is_a2a\\\":true}\",\n \"refusal\":
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
270,\n \"completion_tokens\": 39,\n \"total_tokens\": 309,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_e01c6f58e1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Fri, 30 Jan 2026 21:34:20 GMT
Server:
- cloudflare
Set-Cookie:
- SET-COOKIE-XXX
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '1100'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: '{"id":"ef23c1ef-ef7e-4422-8fd0-330f074e5de8","jsonrpc":"2.0","method":"message/stream","params":{"configuration":{"acceptedOutputModes":["application/json"],"blocking":true},"message":{"kind":"message","messageId":"f5952732-b12e-40f9-a9e9-9779501d6467","parts":[{"kind":"text","text":"What
is the current time in Tokyo?"}],"referenceTaskIds":[],"role":"user"}}}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*, text/event-stream'
accept-encoding:
- ACCEPT-ENCODING-XXX
cache-control:
- no-store
connection:
- keep-alive
content-length:
- '361'
content-type:
- application/json
host:
- localhost:9999
method: POST
uri: http://localhost:9999
response:
body:
string: "data: {\"id\":\"ef23c1ef-ef7e-4422-8fd0-330f074e5de8\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"913b5bd8-c755-4e0c-bbd0-fcf11fcdf257\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"submitted\"},\"taskId\":\"eb05446a-6d96-4964-8e94-3f4ffcc612a7\"}}\r\n\r\ndata:
{\"id\":\"ef23c1ef-ef7e-4422-8fd0-330f074e5de8\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"913b5bd8-c755-4e0c-bbd0-fcf11fcdf257\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"working\"},\"taskId\":\"eb05446a-6d96-4964-8e94-3f4ffcc612a7\"}}\r\n\r\ndata:
{\"id\":\"ef23c1ef-ef7e-4422-8fd0-330f074e5de8\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"913b5bd8-c755-4e0c-bbd0-fcf11fcdf257\",\"final\":true,\"kind\":\"status-update\",\"status\":{\"message\":{\"kind\":\"message\",\"messageId\":\"41813b78-0e9a-41a4-bb6c-7176f9d4e5b2\",\"parts\":[{\"kind\":\"text\",\"text\":\"[Tool:
get_time] 2026-01-31 06:34:21 JST (Asia/Tokyo)\\nThe current time in Tokyo
is 06:34:21 JST on January 31, 2026.\"}],\"role\":\"agent\"},\"state\":\"completed\"},\"taskId\":\"eb05446a-6d96-4964-8e94-3f4ffcc612a7\"}}\r\n\r\n"
headers:
cache-control:
- no-store
connection:
- keep-alive
content-type:
- text/event-stream; charset=utf-8
date:
- Fri, 30 Jan 2026 21:34:20 GMT
server:
- uvicorn
transfer-encoding:
- chunked
x-accel-buffering:
- 'no'
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Research Analyst. Expert
researcher with access to remote specialized agents\nYour personal goal is:
Find and analyze information about AI developments"},{"role":"user","content":"\nCurrent
Task: Delegate to the A2A agent to find the current time in Tokyo.\n\nProvide
your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"a2a_ids":{"description":"A2A
agent IDs to delegate to.","items":{"const":"http://localhost:9999/.well-known/agent-card.json","type":"string"},"maxItems":1,"title":"A2A
Ids","type":"array"},"message":{"description":"The message content. If is_a2a=true,
this is sent to the A2A agent. If is_a2a=false, this is your final answer ending
the conversation.","title":"Message","type":"string"},"is_a2a":{"description":"Set
to false when the remote agent has answered your question - extract their answer
and return it as your final message. Set to true ONLY if you need to ask a NEW,
DIFFERENT question. NEVER repeat the same request - if the conversation history
shows the agent already answered, set is_a2a=false immediately.","title":"Is
A2A","type":"boolean"}},"required":["a2a_ids","message","is_a2a"],"title":"AgentResponse","type":"object","additionalProperties":false},"name":"AgentResponse","strict":true}},"stream":false}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1371'
content-type:
- application/json
cookie:
- COOKIE-XXX
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-helper-method:
- beta.chat.completions.parse
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D3qQBrUkvfrIi9SeEtGbWxFJjum3o\",\n \"object\":
\"chat.completion\",\n \"created\": 1769808863,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"{\\\"a2a_ids\\\":[\\\"http://localhost:9999/.well-known/agent-card.json\\\"],\\\"message\\\":\\\"Please
find the current time in Tokyo and provide it.\\\",\\\"is_a2a\\\":true}\",\n
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\":
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
270,\n \"completion_tokens\": 41,\n \"total_tokens\": 311,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_e01c6f58e1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Fri, 30 Jan 2026 21:34:24 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '966'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: '{"id":"dc04364b-9ad9-4ba4-81a8-bae8a71bee8c","jsonrpc":"2.0","method":"message/stream","params":{"configuration":{"acceptedOutputModes":["application/json"],"blocking":true},"message":{"kind":"message","messageId":"781be5cc-2c77-4c24-a0af-c9d30609ecd7","parts":[{"kind":"text","text":"Please
find the current time in Tokyo and provide it."}],"referenceTaskIds":[],"role":"user"}}}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*, text/event-stream'
accept-encoding:
- ACCEPT-ENCODING-XXX
cache-control:
- no-store
connection:
- keep-alive
content-length:
- '380'
content-type:
- application/json
host:
- localhost:9999
method: POST
uri: http://localhost:9999
response:
body:
string: "data: {\"id\":\"dc04364b-9ad9-4ba4-81a8-bae8a71bee8c\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"ae6fa54d-f6b0-4c15-b8b0-eb310731bc1a\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"submitted\"},\"taskId\":\"0bda253d-b860-4bd4-9129-691b886e4f8b\"}}\r\n\r\ndata:
{\"id\":\"dc04364b-9ad9-4ba4-81a8-bae8a71bee8c\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"ae6fa54d-f6b0-4c15-b8b0-eb310731bc1a\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"working\"},\"taskId\":\"0bda253d-b860-4bd4-9129-691b886e4f8b\"}}\r\n\r\ndata:
{\"id\":\"dc04364b-9ad9-4ba4-81a8-bae8a71bee8c\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"ae6fa54d-f6b0-4c15-b8b0-eb310731bc1a\",\"final\":true,\"kind\":\"status-update\",\"status\":{\"message\":{\"kind\":\"message\",\"messageId\":\"82eec93e-3c91-44c8-9acb-343454eb8fb8\",\"parts\":[{\"kind\":\"text\",\"text\":\"[Tool:
get_time] 2026-01-31 06:34:25 JST (Asia/Tokyo)\\nThe current time in Tokyo
is 06:34 AM on January 31, 2026.\"}],\"role\":\"agent\"},\"state\":\"completed\"},\"taskId\":\"0bda253d-b860-4bd4-9129-691b886e4f8b\"}}\r\n\r\n"
headers:
cache-control:
- no-store
connection:
- keep-alive
content-type:
- text/event-stream; charset=utf-8
date:
- Fri, 30 Jan 2026 21:34:24 GMT
server:
- uvicorn
transfer-encoding:
- chunked
x-accel-buffering:
- 'no'
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Research Analyst. Expert
researcher with access to remote specialized agents\nYour personal goal is:
Find and analyze information about AI developments"},{"role":"user","content":"\nCurrent
Task: Delegate to the A2A agent to find the current time in Tokyo.\n\nProvide
your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"a2a_ids":{"description":"A2A
agent IDs to delegate to.","items":{"const":"http://localhost:9999/.well-known/agent-card.json","type":"string"},"maxItems":1,"title":"A2A
Ids","type":"array"},"message":{"description":"The message content. If is_a2a=true,
this is sent to the A2A agent. If is_a2a=false, this is your final answer ending
the conversation.","title":"Message","type":"string"},"is_a2a":{"description":"Set
to false when the remote agent has answered your question - extract their answer
and return it as your final message. Set to true ONLY if you need to ask a NEW,
DIFFERENT question. NEVER repeat the same request - if the conversation history
shows the agent already answered, set is_a2a=false immediately.","title":"Is
A2A","type":"boolean"}},"required":["a2a_ids","message","is_a2a"],"title":"AgentResponse","type":"object","additionalProperties":false},"name":"AgentResponse","strict":true}},"stream":false}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1371'
content-type:
- application/json
cookie:
- COOKIE-XXX
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-helper-method:
- beta.chat.completions.parse
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D3qQGaNP21E5XkyFMtOtKogiqwKnN\",\n \"object\":
\"chat.completion\",\n \"created\": 1769808868,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"{\\\"a2a_ids\\\":[\\\"http://localhost:9999/.well-known/agent-card.json\\\"],\\\"message\\\":\\\"What
is the current time in Tokyo? Please provide the current local time there.\\\",\\\"is_a2a\\\":true}\",\n
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\":
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
270,\n \"completion_tokens\": 46,\n \"total_tokens\": 316,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_e01c6f58e1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Fri, 30 Jan 2026 21:34:29 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '786'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: '{"id":"06a22a9f-0969-414f-a17b-e3e9d4a3bbef","jsonrpc":"2.0","method":"message/stream","params":{"configuration":{"acceptedOutputModes":["application/json"],"blocking":true},"message":{"kind":"message","messageId":"c2ab08c5-ffb1-43e9-9f43-6e8adbaba95b","parts":[{"kind":"text","text":"What
is the current time in Tokyo? Please provide the current local time there."}],"referenceTaskIds":[],"role":"user"}}}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*, text/event-stream'
accept-encoding:
- ACCEPT-ENCODING-XXX
cache-control:
- no-store
connection:
- keep-alive
content-length:
- '406'
content-type:
- application/json
host:
- localhost:9999
method: POST
uri: http://localhost:9999
response:
body:
string: "data: {\"id\":\"06a22a9f-0969-414f-a17b-e3e9d4a3bbef\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"f86582e4-7d3f-448e-ab51-3522667da9a0\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"submitted\"},\"taskId\":\"5b92d2cb-100e-4be5-ba4d-39b320c3dd4f\"}}\r\n\r\ndata:
{\"id\":\"06a22a9f-0969-414f-a17b-e3e9d4a3bbef\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"f86582e4-7d3f-448e-ab51-3522667da9a0\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"working\"},\"taskId\":\"5b92d2cb-100e-4be5-ba4d-39b320c3dd4f\"}}\r\n\r\ndata:
{\"id\":\"06a22a9f-0969-414f-a17b-e3e9d4a3bbef\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"f86582e4-7d3f-448e-ab51-3522667da9a0\",\"final\":true,\"kind\":\"status-update\",\"status\":{\"message\":{\"kind\":\"message\",\"messageId\":\"befc6d8b-3618-410b-910d-614dc6de77fd\",\"parts\":[{\"kind\":\"text\",\"text\":\"[Tool:
get_time] 2026-01-31 06:34:30 JST (Asia/Tokyo)\\nThe current local time in
Tokyo is 06:34 AM on January 31, 2026.\"}],\"role\":\"agent\"},\"state\":\"completed\"},\"taskId\":\"5b92d2cb-100e-4be5-ba4d-39b320c3dd4f\"}}\r\n\r\n"
headers:
cache-control:
- no-store
connection:
- keep-alive
content-type:
- text/event-stream; charset=utf-8
date:
- Fri, 30 Jan 2026 21:34:28 GMT
server:
- uvicorn
transfer-encoding:
- chunked
x-accel-buffering:
- 'no'
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Research Analyst. Expert
researcher with access to remote specialized agents\nYour personal goal is:
Find and analyze information about AI developments"},{"role":"user","content":"\nCurrent
Task: Delegate to the A2A agent to find the current time in Tokyo.\n\nProvide
your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"a2a_ids":{"description":"A2A
agent IDs to delegate to.","items":{"const":"http://localhost:9999/.well-known/agent-card.json","type":"string"},"maxItems":1,"title":"A2A
Ids","type":"array"},"message":{"description":"The message content. If is_a2a=true,
this is sent to the A2A agent. If is_a2a=false, this is your final answer ending
the conversation.","title":"Message","type":"string"},"is_a2a":{"description":"Set
to false when the remote agent has answered your question - extract their answer
and return it as your final message. Set to true ONLY if you need to ask a NEW,
DIFFERENT question. NEVER repeat the same request - if the conversation history
shows the agent already answered, set is_a2a=false immediately.","title":"Is
A2A","type":"boolean"}},"required":["a2a_ids","message","is_a2a"],"title":"AgentResponse","type":"object","additionalProperties":false},"name":"AgentResponse","strict":true}},"stream":false}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1371'
content-type:
- application/json
cookie:
- COOKIE-XXX
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-helper-method:
- beta.chat.completions.parse
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D3qQKvrKehCGN5jjCKyRqyN6g88gg\",\n \"object\":
\"chat.completion\",\n \"created\": 1769808872,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"{\\\"a2a_ids\\\":[\\\"http://localhost:9999/.well-known/agent-card.json\\\"],\\\"message\\\":\\\"What
is the current time in Tokyo?\\\",\\\"is_a2a\\\":true}\",\n \"refusal\":
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
270,\n \"completion_tokens\": 39,\n \"total_tokens\": 309,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_e01c6f58e1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Fri, 30 Jan 2026 21:34:32 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '742'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
version: 1

View File

@@ -1,216 +0,0 @@
interactions:
- request:
body: '{"messages":[{"role":"system","content":"You are Simple Assistant. A helpful
assistant\nYour personal goal is: Help with basic tasks"},{"role":"user","content":"\nCurrent
Task: Say hello\n\nProvide your complete response:"}],"model":"gpt-4.1-mini"}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '248'
content-type:
- application/json
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.13.3
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D8WiFd3X8iE0Xk2N1S3L2k798qWFq\",\n \"object\":
\"chat.completion\",\n \"created\": 1770924743,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Hello! How can I assist you today?\",\n
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\":
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
41,\n \"completion_tokens\": 9,\n \"total_tokens\": 50,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_75546bd1a7\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Thu, 12 Feb 2026 19:32:23 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '346'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
set-cookie:
- SET-COOKIE-XXX
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Simple Assistant. A helpful
assistant\nYour personal goal is: Help with basic tasks"},{"role":"user","content":"\nCurrent
Task: Say hello\n\nProvide your complete response:"}],"model":"gpt-4.1-mini"}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '248'
content-type:
- application/json
cookie:
- COOKIE-XXX
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.13.3
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D8WiFOaYAAKsuxLAXe6PwTk5AjYdk\",\n \"object\":
\"chat.completion\",\n \"created\": 1770924743,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Hello! How can I assist you today?\",\n
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\":
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
41,\n \"completion_tokens\": 9,\n \"total_tokens\": 50,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_75546bd1a7\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Thu, 12 Feb 2026 19:32:24 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '618'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
set-cookie:
- SET-COOKIE-XXX
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
version: 1

View File

@@ -1,220 +0,0 @@
interactions:
- request:
body: '{"messages":[{"role":"system","content":"You are Research Analyst. Expert
researcher with access to remote agents\nYour personal goal is: Find and analyze
information"},{"role":"user","content":"\nCurrent Task: Use the remote A2A agent
to calculate 10 plus 15.\n\nProvide your complete response:"}],"model":"gpt-4.1-mini"}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '322'
content-type:
- application/json
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.13.3
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D8WiD3djMj91vXlZgRexuoagt4YjK\",\n \"object\":
\"chat.completion\",\n \"created\": 1770924741,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"I am using the remote A2A agent to
calculate 10 plus 15.\\n\\nCalculation result: 10 + 15 = 25\",\n \"refusal\":
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
57,\n \"completion_tokens\": 28,\n \"total_tokens\": 85,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_75546bd1a7\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Thu, 12 Feb 2026 19:32:21 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '633'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
set-cookie:
- SET-COOKIE-XXX
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Research Analyst. Expert
researcher with access to remote agents\nYour personal goal is: Find and analyze
information"},{"role":"user","content":"\nCurrent Task: Use the remote A2A agent
to calculate 10 plus 15.\n\nProvide your complete response:"}],"model":"gpt-4.1-mini"}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '322'
content-type:
- application/json
cookie:
- COOKIE-XXX
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.13.3
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D8WiEa5fOdnyGxf1o0YYZRjEVstUX\",\n \"object\":
\"chat.completion\",\n \"created\": 1770924742,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Using the remote A2A agent to calculate
10 plus 15:\\n\\n10 + 15 = 25\\n\\nThe result is 25.\",\n \"refusal\":
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
57,\n \"completion_tokens\": 29,\n \"total_tokens\": 86,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_75546bd1a7\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Thu, 12 Feb 2026 19:32:22 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '581'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
set-cookie:
- SET-COOKIE-XXX
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
version: 1

View File

@@ -1,616 +0,0 @@
interactions:
- request:
body: '{"messages":[{"role":"system","content":"You are Research Analyst. Expert
researcher with access to remote agents\nYour personal goal is: Find and analyze
information"},{"role":"user","content":"\nCurrent Task: Ask the A2A agent to
calculate 100 divided by 4.\n\nProvide your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"a2a_ids":{"description":"A2A
agent IDs to delegate to.","items":{"const":"http://localhost:9999/.well-known/agent-card.json","type":"string"},"maxItems":1,"title":"A2A
Ids","type":"array"},"message":{"description":"The message content. If is_a2a=true,
this is sent to the A2A agent. If is_a2a=false, this is your final answer ending
the conversation.","title":"Message","type":"string"},"is_a2a":{"description":"Set
to false when the remote agent has answered your question - extract their answer
and return it as your final message. Set to true ONLY if you need to ask a NEW,
DIFFERENT question. NEVER repeat the same request - if the conversation history
shows the agent already answered, set is_a2a=false immediately.","title":"Is
A2A","type":"boolean"}},"required":["a2a_ids","message","is_a2a"],"title":"AgentResponse","type":"object","additionalProperties":false},"name":"AgentResponse","strict":true}},"stream":false}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1325'
content-type:
- application/json
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-helper-method:
- beta.chat.completions.parse
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D3qOhyHELb5GreUumlAiVahTNIN2R\",\n \"object\":
\"chat.completion\",\n \"created\": 1769808771,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"{\\\"a2a_ids\\\":[\\\"http://localhost:9999/.well-known/agent-card.json\\\"],\\\"message\\\":\\\"Calculate
100 divided by 4, please.\\\",\\\"is_a2a\\\":true}\",\n \"refusal\":
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
266,\n \"completion_tokens\": 40,\n \"total_tokens\": 306,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_e01c6f58e1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Fri, 30 Jan 2026 21:32:52 GMT
Server:
- cloudflare
Set-Cookie:
- SET-COOKIE-XXX
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '685'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: '{"id":"4d72e53a-2c40-42cb-b74a-404a5a798ba6","jsonrpc":"2.0","method":"message/stream","params":{"configuration":{"acceptedOutputModes":["application/json"],"blocking":true},"message":{"kind":"message","messageId":"fa5e002d-f81b-4b61-84e6-27f40d0e0240","parts":[{"kind":"text","text":"Calculate
100 divided by 4, please."}],"referenceTaskIds":[],"role":"user"}}}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*, text/event-stream'
accept-encoding:
- ACCEPT-ENCODING-XXX
cache-control:
- no-store
connection:
- keep-alive
content-length:
- '362'
content-type:
- application/json
host:
- localhost:9999
method: POST
uri: http://localhost:9999
response:
body:
string: "data: {\"id\":\"4d72e53a-2c40-42cb-b74a-404a5a798ba6\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"91d26e2b-6c66-45ce-9356-74a0eb634c28\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"submitted\"},\"taskId\":\"151869b9-f640-454a-865d-405413a0859d\"}}\r\n\r\ndata:
{\"id\":\"4d72e53a-2c40-42cb-b74a-404a5a798ba6\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"91d26e2b-6c66-45ce-9356-74a0eb634c28\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"working\"},\"taskId\":\"151869b9-f640-454a-865d-405413a0859d\"}}\r\n\r\ndata:
{\"id\":\"4d72e53a-2c40-42cb-b74a-404a5a798ba6\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"91d26e2b-6c66-45ce-9356-74a0eb634c28\",\"final\":true,\"kind\":\"status-update\",\"status\":{\"message\":{\"kind\":\"message\",\"messageId\":\"3bf24e8a-6a3b-45f1-82eb-7a283a89e0ac\",\"parts\":[{\"kind\":\"text\",\"text\":\"[Tool:
calculator] 100 / 4 = 25.0\\nThe result of 100 divided by 4 is 25.\"}],\"role\":\"agent\"},\"state\":\"completed\"},\"taskId\":\"151869b9-f640-454a-865d-405413a0859d\"}}\r\n\r\n"
headers:
cache-control:
- no-store
connection:
- keep-alive
content-type:
- text/event-stream; charset=utf-8
date:
- Fri, 30 Jan 2026 21:32:51 GMT
server:
- uvicorn
transfer-encoding:
- chunked
x-accel-buffering:
- 'no'
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Research Analyst. Expert
researcher with access to remote agents\nYour personal goal is: Find and analyze
information"},{"role":"user","content":"\nCurrent Task: Ask the A2A agent to
calculate 100 divided by 4.\n\nProvide your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"a2a_ids":{"description":"A2A
agent IDs to delegate to.","items":{"const":"http://localhost:9999/.well-known/agent-card.json","type":"string"},"maxItems":1,"title":"A2A
Ids","type":"array"},"message":{"description":"The message content. If is_a2a=true,
this is sent to the A2A agent. If is_a2a=false, this is your final answer ending
the conversation.","title":"Message","type":"string"},"is_a2a":{"description":"Set
to false when the remote agent has answered your question - extract their answer
and return it as your final message. Set to true ONLY if you need to ask a NEW,
DIFFERENT question. NEVER repeat the same request - if the conversation history
shows the agent already answered, set is_a2a=false immediately.","title":"Is
A2A","type":"boolean"}},"required":["a2a_ids","message","is_a2a"],"title":"AgentResponse","type":"object","additionalProperties":false},"name":"AgentResponse","strict":true}},"stream":false}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1325'
content-type:
- application/json
cookie:
- COOKIE-XXX
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-helper-method:
- beta.chat.completions.parse
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D3qOmCdZD7rL5Q1syh0ag6AuH5bw3\",\n \"object\":
\"chat.completion\",\n \"created\": 1769808776,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"{\\\"a2a_ids\\\":[\\\"http://localhost:9999/.well-known/agent-card.json\\\"],\\\"message\\\":\\\"Calculate
100 divided by 4.\\\",\\\"is_a2a\\\":true}\",\n \"refusal\": null,\n
\ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\":
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 266,\n \"completion_tokens\":
38,\n \"total_tokens\": 304,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_e01c6f58e1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Fri, 30 Jan 2026 21:32:57 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '680'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: '{"id":"4ea0d213-a2cd-4d10-9b8c-034cfaa1d678","jsonrpc":"2.0","method":"message/stream","params":{"configuration":{"acceptedOutputModes":["application/json"],"blocking":true},"message":{"kind":"message","messageId":"22c5127f-e6d8-4aae-852c-d2d131474e38","parts":[{"kind":"text","text":"Calculate
100 divided by 4."}],"referenceTaskIds":[],"role":"user"}}}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*, text/event-stream'
accept-encoding:
- ACCEPT-ENCODING-XXX
cache-control:
- no-store
connection:
- keep-alive
content-length:
- '354'
content-type:
- application/json
host:
- localhost:9999
method: POST
uri: http://localhost:9999
response:
body:
string: "data: {\"id\":\"4ea0d213-a2cd-4d10-9b8c-034cfaa1d678\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"cba0ab67-9cc6-4afc-a15a-009b0abe4a1c\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"submitted\"},\"taskId\":\"50c44a89-d6bd-4272-92d0-9aef38b35c93\"}}\r\n\r\ndata:
{\"id\":\"4ea0d213-a2cd-4d10-9b8c-034cfaa1d678\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"cba0ab67-9cc6-4afc-a15a-009b0abe4a1c\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"working\"},\"taskId\":\"50c44a89-d6bd-4272-92d0-9aef38b35c93\"}}\r\n\r\ndata:
{\"id\":\"4ea0d213-a2cd-4d10-9b8c-034cfaa1d678\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"cba0ab67-9cc6-4afc-a15a-009b0abe4a1c\",\"final\":true,\"kind\":\"status-update\",\"status\":{\"message\":{\"kind\":\"message\",\"messageId\":\"1d96bb0a-a3b7-4217-a1bc-bdb2658f14b7\",\"parts\":[{\"kind\":\"text\",\"text\":\"[Tool:
calculator] 100 / 4 = 25.0\\n100 divided by 4 is 25.0.\"}],\"role\":\"agent\"},\"state\":\"completed\"},\"taskId\":\"50c44a89-d6bd-4272-92d0-9aef38b35c93\"}}\r\n\r\n"
headers:
cache-control:
- no-store
connection:
- keep-alive
content-type:
- text/event-stream; charset=utf-8
date:
- Fri, 30 Jan 2026 21:32:56 GMT
server:
- uvicorn
transfer-encoding:
- chunked
x-accel-buffering:
- 'no'
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Research Analyst. Expert
researcher with access to remote agents\nYour personal goal is: Find and analyze
information"},{"role":"user","content":"\nCurrent Task: Ask the A2A agent to
calculate 100 divided by 4.\n\nProvide your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"a2a_ids":{"description":"A2A
agent IDs to delegate to.","items":{"const":"http://localhost:9999/.well-known/agent-card.json","type":"string"},"maxItems":1,"title":"A2A
Ids","type":"array"},"message":{"description":"The message content. If is_a2a=true,
this is sent to the A2A agent. If is_a2a=false, this is your final answer ending
the conversation.","title":"Message","type":"string"},"is_a2a":{"description":"Set
to false when the remote agent has answered your question - extract their answer
and return it as your final message. Set to true ONLY if you need to ask a NEW,
DIFFERENT question. NEVER repeat the same request - if the conversation history
shows the agent already answered, set is_a2a=false immediately.","title":"Is
A2A","type":"boolean"}},"required":["a2a_ids","message","is_a2a"],"title":"AgentResponse","type":"object","additionalProperties":false},"name":"AgentResponse","strict":true}},"stream":false}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1325'
content-type:
- application/json
cookie:
- COOKIE-XXX
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-helper-method:
- beta.chat.completions.parse
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D3qOqMBxYghf1iunoo7hIo23Mmyw0\",\n \"object\":
\"chat.completion\",\n \"created\": 1769808780,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"{\\\"a2a_ids\\\":[\\\"http://localhost:9999/.well-known/agent-card.json\\\"],\\\"message\\\":\\\"Calculate
100 divided by 4.\\\",\\\"is_a2a\\\":true}\",\n \"refusal\": null,\n
\ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\":
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 266,\n \"completion_tokens\":
38,\n \"total_tokens\": 304,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_e01c6f58e1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Fri, 30 Jan 2026 21:33:00 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '572'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
- request:
body: '{"id":"217af3be-d6a6-48df-9460-f254481f7da6","jsonrpc":"2.0","method":"message/stream","params":{"configuration":{"acceptedOutputModes":["application/json"],"blocking":true},"message":{"kind":"message","messageId":"2886d1ee-0fc0-4143-98d8-e7a75ade6895","parts":[{"kind":"text","text":"Calculate
100 divided by 4."}],"referenceTaskIds":[],"role":"user"}}}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- '*/*, text/event-stream'
accept-encoding:
- ACCEPT-ENCODING-XXX
cache-control:
- no-store
connection:
- keep-alive
content-length:
- '354'
content-type:
- application/json
host:
- localhost:9999
method: POST
uri: http://localhost:9999
response:
body:
string: "data: {\"id\":\"217af3be-d6a6-48df-9460-f254481f7da6\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"a3254fcf-baf7-4f46-9767-60156d837a6e\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"submitted\"},\"taskId\":\"2c0e3b76-60f7-4636-934a-2ec41af75ead\"}}\r\n\r\ndata:
{\"id\":\"217af3be-d6a6-48df-9460-f254481f7da6\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"a3254fcf-baf7-4f46-9767-60156d837a6e\",\"final\":false,\"kind\":\"status-update\",\"status\":{\"state\":\"working\"},\"taskId\":\"2c0e3b76-60f7-4636-934a-2ec41af75ead\"}}\r\n\r\ndata:
{\"id\":\"217af3be-d6a6-48df-9460-f254481f7da6\",\"jsonrpc\":\"2.0\",\"result\":{\"contextId\":\"a3254fcf-baf7-4f46-9767-60156d837a6e\",\"final\":true,\"kind\":\"status-update\",\"status\":{\"message\":{\"kind\":\"message\",\"messageId\":\"b32f6b9c-70b5-4152-9df3-19436e3b655d\",\"parts\":[{\"kind\":\"text\",\"text\":\"[Tool:
calculator] 100 / 4 = 25.0\\n100 divided by 4 equals 25.0.\"}],\"role\":\"agent\"},\"state\":\"completed\"},\"taskId\":\"2c0e3b76-60f7-4636-934a-2ec41af75ead\"}}\r\n\r\n"
headers:
cache-control:
- no-store
connection:
- keep-alive
content-type:
- text/event-stream; charset=utf-8
date:
- Fri, 30 Jan 2026 21:32:59 GMT
server:
- uvicorn
transfer-encoding:
- chunked
x-accel-buffering:
- 'no'
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Research Analyst. Expert
researcher with access to remote agents\nYour personal goal is: Find and analyze
information"},{"role":"user","content":"\nCurrent Task: Ask the A2A agent to
calculate 100 divided by 4.\n\nProvide your complete response:"}],"model":"gpt-4.1-mini","response_format":{"type":"json_schema","json_schema":{"schema":{"properties":{"a2a_ids":{"description":"A2A
agent IDs to delegate to.","items":{"const":"http://localhost:9999/.well-known/agent-card.json","type":"string"},"maxItems":1,"title":"A2A
Ids","type":"array"},"message":{"description":"The message content. If is_a2a=true,
this is sent to the A2A agent. If is_a2a=false, this is your final answer ending
the conversation.","title":"Message","type":"string"},"is_a2a":{"description":"Set
to false when the remote agent has answered your question - extract their answer
and return it as your final message. Set to true ONLY if you need to ask a NEW,
DIFFERENT question. NEVER repeat the same request - if the conversation history
shows the agent already answered, set is_a2a=false immediately.","title":"Is
A2A","type":"boolean"}},"required":["a2a_ids","message","is_a2a"],"title":"AgentResponse","type":"object","additionalProperties":false},"name":"AgentResponse","strict":true}},"stream":false}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '1325'
content-type:
- application/json
cookie:
- COOKIE-XXX
host:
- api.openai.com
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-helper-method:
- beta.chat.completions.parse
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.83.0
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: "{\n \"id\": \"chatcmpl-D3qOu8lITQt6WBCLcm6bvduIC2xP0\",\n \"object\":
\"chat.completion\",\n \"created\": 1769808784,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"{\\\"a2a_ids\\\":[\\\"http://localhost:9999/.well-known/agent-card.json\\\"],\\\"message\\\":\\\"Please
calculate 100 divided by 4.\\\",\\\"is_a2a\\\":true}\",\n \"refusal\":
null,\n \"annotations\": []\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
266,\n \"completion_tokens\": 39,\n \"total_tokens\": 305,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_e01c6f58e1\"\n}\n"
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Fri, 30 Jan 2026 21:33:04 GMT
Server:
- cloudflare
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '934'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
version: 1