Gl/feat/a2a refactor (#3793)

* feat: agent metaclass, refactor a2a to wrappers

* feat: a2a schemas and utils

* chore: move agent class, update imports

* refactor: organize imports to avoid circularity, add a2a to console

* feat: pass response_model through call chain

* feat: add standard openapi spec serialization to tools and structured output

* feat: a2a events

* chore: add a2a to pyproject

* docs: minimal base for learn docs

* fix: adjust a2a conversation flow, allow llm to decide exit until max_retries

* fix: inject agent skills into initial prompt

* fix: format agent card as json in prompt

* refactor: simplify A2A agent prompt formatting and improve skill display

* chore: wide cleanup

* chore: cleanup logic, add auth cache, use json for messages in prompt

* chore: update docs

* fix: doc snippets formatting

* feat: optimize A2A agent card fetching and improve error reporting

* chore: move imports to top of file

* chore: refactor hasattr check

* chore: add httpx-auth, update lockfile

* feat: create base public api

* chore: cleanup modules, add docstrings, types

* fix: exclude extra fields in prompt

* chore: update docs

* tests: update to correct import

* chore: lint for ruff, add missing import

* fix: tweak openai streaming logic for response model

* tests: add reimport for test

* tests: add reimport for test

* fix: don't set a2a attr if not set

* fix: don't set a2a attr if not set

* chore: update cassettes

* tests: fix tests

* fix: use instructor and dont pass response_format for litellm

* chore: consolidate event listeners, add typing

* fix: address race condition in test, update cassettes

* tests: add correct mocks, rerun cassette for json

* tests: update cassette

* chore: regenerate cassette after new run

* fix: make token manager access-safe

* fix: make token manager access-safe

* merge

* chore: update test and cassete for output pydantic

* fix: tweak to disallow deadlock

* chore: linter

* fix: adjust event ordering for threading

* fix: use conditional for batch check

* tests: tweak for emission

* tests: simplify api + event check

* fix: ensure non-function calling llms see json formatted string

* tests: tweak message comparison

* fix: use internal instructor for litellm structure responses

---------

Co-authored-by: Mike Plachta <mike@crewai.com>
This commit is contained in:
Greyson LaLonde
2025-11-01 02:42:03 +01:00
committed by GitHub
parent e229ef4e19
commit e134e5305b
71 changed files with 9790 additions and 4592 deletions

View File

@@ -1,10 +1,10 @@
from typing import Any
import pytest
from crewai.agent import BaseAgent
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.agents.agent_adapters.base_agent_adapter import BaseAgentAdapter
from crewai.tools.base_tool import BaseTool
from crewai.utilities.token_counter_callback import TokenProcess
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
from pydantic import BaseModel

View File

@@ -1342,7 +1342,7 @@ def test_ensure_first_task_allow_crewai_trigger_context_is_false_does_not_inject
assert "Trigger Payload: Context data" in second_prompt
@patch("crewai.agent.CrewTrainingHandler")
@patch("crewai.agent.core.CrewTrainingHandler")
def test_agent_training_handler(crew_training_handler):
task_prompt = "What is 1 + 1?"
agent = Agent(
@@ -1351,7 +1351,7 @@ def test_agent_training_handler(crew_training_handler):
backstory="test backstory",
verbose=True,
)
crew_training_handler().load.return_value = {
crew_training_handler.return_value.load.return_value = {
f"{agent.id!s}": {"0": {"human_feedback": "good"}}
}
@@ -1360,11 +1360,11 @@ def test_agent_training_handler(crew_training_handler):
assert result == "What is 1 + 1?\n\nYou MUST follow these instructions: \n good"
crew_training_handler.assert_has_calls(
[mock.call(), mock.call("training_data.pkl"), mock.call().load()]
[mock.call("training_data.pkl"), mock.call().load()]
)
@patch("crewai.agent.CrewTrainingHandler")
@patch("crewai.agent.core.CrewTrainingHandler")
def test_agent_use_trained_data(crew_training_handler):
task_prompt = "What is 1 + 1?"
agent = Agent(
@@ -1373,7 +1373,7 @@ def test_agent_use_trained_data(crew_training_handler):
backstory="test backstory",
verbose=True,
)
crew_training_handler().load.return_value = {
crew_training_handler.return_value.load.return_value = {
agent.role: {
"suggestions": [
"The result of the math operation must be right.",
@@ -1389,7 +1389,7 @@ def test_agent_use_trained_data(crew_training_handler):
" - The result of the math operation must be right.\n - Result must be better than 1."
)
crew_training_handler.assert_has_calls(
[mock.call(), mock.call("trained_agents_data.pkl"), mock.call().load()]
[mock.call("trained_agents_data.pkl"), mock.call().load()]
)

View File

@@ -0,0 +1,111 @@
"""Test A2A wrapper is only applied when a2a is passed to Agent."""
from unittest.mock import patch
import pytest
from crewai import Agent
from crewai.a2a.config import A2AConfig
try:
import a2a # noqa: F401
A2A_SDK_INSTALLED = True
except ImportError:
A2A_SDK_INSTALLED = False
def test_agent_without_a2a_has_no_wrapper():
"""Verify that agents without a2a don't get the wrapper applied."""
agent = Agent(
role="test role",
goal="test goal",
backstory="test backstory",
)
assert agent.a2a is None
assert callable(agent.execute_task)
@pytest.mark.skipif(
True,
reason="Requires a2a-sdk to be installed. This test verifies wrapper is applied when a2a is set.",
)
def test_agent_with_a2a_has_wrapper():
"""Verify that agents with a2a get the wrapper applied."""
a2a_config = A2AConfig(
endpoint="http://test-endpoint.com",
)
agent = Agent(
role="test role",
goal="test goal",
backstory="test backstory",
a2a=a2a_config,
)
assert agent.a2a is not None
assert agent.a2a.endpoint == "http://test-endpoint.com"
assert callable(agent.execute_task)
@pytest.mark.skipif(not A2A_SDK_INSTALLED, reason="Requires a2a-sdk to be installed")
def test_agent_with_a2a_creates_successfully():
"""Verify that creating an agent with a2a succeeds and applies wrapper."""
a2a_config = A2AConfig(
endpoint="http://test-endpoint.com",
)
agent = Agent(
role="test role",
goal="test goal",
backstory="test backstory",
a2a=a2a_config,
)
assert agent.a2a is not None
assert agent.a2a.endpoint == "http://test-endpoint.com/"
assert callable(agent.execute_task)
assert hasattr(agent.execute_task, "__wrapped__")
def test_multiple_agents_without_a2a():
"""Verify that multiple agents without a2a work correctly."""
agent1 = Agent(
role="agent 1",
goal="test goal",
backstory="test backstory",
)
agent2 = Agent(
role="agent 2",
goal="test goal",
backstory="test backstory",
)
assert agent1.a2a is None
assert agent2.a2a is None
assert callable(agent1.execute_task)
assert callable(agent2.execute_task)
@pytest.mark.skipif(not A2A_SDK_INSTALLED, reason="Requires a2a-sdk to be installed")
def test_wrapper_is_applied_differently_per_instance():
"""Verify that agents with and without a2a have different execute_task methods."""
agent_without_a2a = Agent(
role="agent without a2a",
goal="test goal",
backstory="test backstory",
)
a2a_config = A2AConfig(endpoint="http://test-endpoint.com")
agent_with_a2a = Agent(
role="agent with a2a",
goal="test goal",
backstory="test backstory",
a2a=a2a_config,
)
assert agent_without_a2a.execute_task.__func__ is not agent_with_a2a.execute_task.__func__
assert not hasattr(agent_without_a2a.execute_task, "__wrapped__")
assert hasattr(agent_with_a2a.execute_task, "__wrapped__")

View File

@@ -103,7 +103,7 @@ def test_lite_agent_created_with_correct_parameters(monkeypatch, verbose):
super().__init__(**kwargs)
# Patch the LiteAgent class
monkeypatch.setattr("crewai.agent.LiteAgent", MockLiteAgent)
monkeypatch.setattr("crewai.agent.core.LiteAgent", MockLiteAgent)
# Call kickoff to create the LiteAgent
agent.kickoff("Test query")
@@ -123,8 +123,6 @@ def test_lite_agent_created_with_correct_parameters(monkeypatch, verbose):
assert created_lite_agent["response_format"] is None
# Test with a response_format
monkeypatch.setattr("crewai.agent.LiteAgent", MockLiteAgent)
class TestResponse(BaseModel):
test_field: str
@@ -527,6 +525,7 @@ def test_lite_agent_with_custom_llm_and_guardrails():
available_functions=None,
from_task=None,
from_agent=None,
response_model=None,
) -> str:
self.call_count += 1