mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-16 03:28:30 +00:00
Merge branch 'main' of github.com:crewAIInc/crewAI into lorenze/enh-decouple-executor-from-crew
This commit is contained in:
@@ -5,7 +5,7 @@ This module is separate from experimental.a2a to avoid circular imports.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Annotated, Any, ClassVar
|
||||
from typing import Annotated, Any, ClassVar, Literal
|
||||
|
||||
from pydantic import (
|
||||
BaseModel,
|
||||
@@ -53,6 +53,7 @@ class A2AConfig(BaseModel):
|
||||
fail_fast: If True, raise error when agent unreachable; if False, skip and continue.
|
||||
trust_remote_completion_status: If True, return A2A agent's result directly when completed.
|
||||
updates: Update mechanism config.
|
||||
transport_protocol: A2A transport protocol (grpc, jsonrpc, http+json).
|
||||
"""
|
||||
|
||||
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
|
||||
@@ -82,3 +83,7 @@ class A2AConfig(BaseModel):
|
||||
default_factory=_get_default_update_config,
|
||||
description="Update mechanism config",
|
||||
)
|
||||
transport_protocol: Literal["JSONRPC", "GRPC", "HTTP+JSON"] = Field(
|
||||
default="JSONRPC",
|
||||
description="Specified mode of A2A transport protocol",
|
||||
)
|
||||
|
||||
@@ -7,7 +7,7 @@ from collections.abc import AsyncIterator, MutableMapping
|
||||
from contextlib import asynccontextmanager
|
||||
from functools import lru_cache
|
||||
import time
|
||||
from typing import TYPE_CHECKING, Any
|
||||
from typing import TYPE_CHECKING, Any, Literal
|
||||
import uuid
|
||||
|
||||
from a2a.client import A2AClientHTTPError, Client, ClientConfig, ClientFactory
|
||||
@@ -18,7 +18,6 @@ from a2a.types import (
|
||||
PushNotificationConfig as A2APushNotificationConfig,
|
||||
Role,
|
||||
TextPart,
|
||||
TransportProtocol,
|
||||
)
|
||||
from aiocache import cached # type: ignore[import-untyped]
|
||||
from aiocache.serializers import PickleSerializer # type: ignore[import-untyped]
|
||||
@@ -259,6 +258,7 @@ async def _afetch_agent_card_impl(
|
||||
|
||||
def execute_a2a_delegation(
|
||||
endpoint: str,
|
||||
transport_protocol: Literal["JSONRPC", "GRPC", "HTTP+JSON"],
|
||||
auth: AuthScheme | None,
|
||||
timeout: int,
|
||||
task_description: str,
|
||||
@@ -282,6 +282,23 @@ def execute_a2a_delegation(
|
||||
use aexecute_a2a_delegation directly.
|
||||
|
||||
Args:
|
||||
endpoint: A2A agent endpoint URL (AgentCard URL)
|
||||
transport_protocol: Optional A2A transport protocol (grpc, jsonrpc, http+json)
|
||||
auth: Optional AuthScheme for authentication (Bearer, OAuth2, API Key, HTTP Basic/Digest)
|
||||
timeout: Request timeout in seconds
|
||||
task_description: The task to delegate
|
||||
context: Optional context information
|
||||
context_id: Context ID for correlating messages/tasks
|
||||
task_id: Specific task identifier
|
||||
reference_task_ids: List of related task IDs
|
||||
metadata: Additional metadata (external_id, request_id, etc.)
|
||||
extensions: Protocol extensions for custom fields
|
||||
conversation_history: Previous Message objects from conversation
|
||||
agent_id: Agent identifier for logging
|
||||
agent_role: Role of the CrewAI agent delegating the task
|
||||
agent_branch: Optional agent tree branch for logging
|
||||
response_model: Optional Pydantic model for structured outputs
|
||||
turn_number: Optional turn number for multi-turn conversations
|
||||
endpoint: A2A agent endpoint URL.
|
||||
auth: Optional AuthScheme for authentication.
|
||||
timeout: Request timeout in seconds.
|
||||
@@ -323,6 +340,7 @@ def execute_a2a_delegation(
|
||||
agent_role=agent_role,
|
||||
agent_branch=agent_branch,
|
||||
response_model=response_model,
|
||||
transport_protocol=transport_protocol,
|
||||
turn_number=turn_number,
|
||||
updates=updates,
|
||||
)
|
||||
@@ -333,6 +351,7 @@ def execute_a2a_delegation(
|
||||
|
||||
async def aexecute_a2a_delegation(
|
||||
endpoint: str,
|
||||
transport_protocol: Literal["JSONRPC", "GRPC", "HTTP+JSON"],
|
||||
auth: AuthScheme | None,
|
||||
timeout: int,
|
||||
task_description: str,
|
||||
@@ -356,6 +375,23 @@ async def aexecute_a2a_delegation(
|
||||
in an async context (e.g., with Crew.akickoff() or agent.aexecute_task()).
|
||||
|
||||
Args:
|
||||
endpoint: A2A agent endpoint URL
|
||||
transport_protocol: Optional A2A transport protocol (grpc, jsonrpc, http+json)
|
||||
auth: Optional AuthScheme for authentication
|
||||
timeout: Request timeout in seconds
|
||||
task_description: Task to delegate
|
||||
context: Optional context
|
||||
context_id: Context ID for correlation
|
||||
task_id: Specific task identifier
|
||||
reference_task_ids: Related task IDs
|
||||
metadata: Additional metadata
|
||||
extensions: Protocol extensions
|
||||
conversation_history: Previous Message objects
|
||||
turn_number: Current turn number
|
||||
agent_branch: Agent tree branch for logging
|
||||
agent_id: Agent identifier for logging
|
||||
agent_role: Agent role for logging
|
||||
response_model: Optional Pydantic model for structured outputs
|
||||
endpoint: A2A agent endpoint URL.
|
||||
auth: Optional AuthScheme for authentication.
|
||||
timeout: Request timeout in seconds.
|
||||
@@ -414,6 +450,7 @@ async def aexecute_a2a_delegation(
|
||||
agent_role=agent_role,
|
||||
response_model=response_model,
|
||||
updates=updates,
|
||||
transport_protocol=transport_protocol,
|
||||
)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
@@ -431,6 +468,7 @@ async def aexecute_a2a_delegation(
|
||||
|
||||
async def _aexecute_a2a_delegation_impl(
|
||||
endpoint: str,
|
||||
transport_protocol: Literal["JSONRPC", "GRPC", "HTTP+JSON"],
|
||||
auth: AuthScheme | None,
|
||||
timeout: int,
|
||||
task_description: str,
|
||||
@@ -524,7 +562,6 @@ async def _aexecute_a2a_delegation_impl(
|
||||
extensions=extensions,
|
||||
)
|
||||
|
||||
transport_protocol = TransportProtocol("JSONRPC")
|
||||
new_messages: list[Message] = [*conversation_history, message]
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
@@ -596,7 +633,7 @@ async def _aexecute_a2a_delegation_impl(
|
||||
@asynccontextmanager
|
||||
async def _create_a2a_client(
|
||||
agent_card: AgentCard,
|
||||
transport_protocol: TransportProtocol,
|
||||
transport_protocol: Literal["JSONRPC", "GRPC", "HTTP+JSON"],
|
||||
timeout: int,
|
||||
headers: MutableMapping[str, str],
|
||||
streaming: bool,
|
||||
@@ -640,7 +677,7 @@ async def _create_a2a_client(
|
||||
|
||||
config = ClientConfig(
|
||||
httpx_client=httpx_client,
|
||||
supported_transports=[str(transport_protocol.value)],
|
||||
supported_transports=[transport_protocol],
|
||||
streaming=streaming and not use_polling,
|
||||
polling=use_polling,
|
||||
accepted_output_modes=["application/json"],
|
||||
|
||||
@@ -771,6 +771,7 @@ def _delegate_to_a2a(
|
||||
response_model=agent_config.response_model,
|
||||
turn_number=turn_num + 1,
|
||||
updates=agent_config.updates,
|
||||
transport_protocol=agent_config.transport_protocol,
|
||||
)
|
||||
|
||||
conversation_history = a2a_result.get("history", [])
|
||||
@@ -1085,6 +1086,7 @@ async def _adelegate_to_a2a(
|
||||
agent_branch=agent_branch,
|
||||
response_model=agent_config.response_model,
|
||||
turn_number=turn_num + 1,
|
||||
transport_protocol=agent_config.transport_protocol,
|
||||
updates=agent_config.updates,
|
||||
)
|
||||
|
||||
|
||||
@@ -209,10 +209,9 @@ class EventListener(BaseEventListener):
|
||||
@crewai_event_bus.on(TaskCompletedEvent)
|
||||
def on_task_completed(source: Any, event: TaskCompletedEvent) -> None:
|
||||
# Handle telemetry
|
||||
span = self.execution_spans.get(source)
|
||||
span = self.execution_spans.pop(source, None)
|
||||
if span:
|
||||
self._telemetry.task_ended(span, source, source.agent.crew)
|
||||
self.execution_spans[source] = None
|
||||
|
||||
# Pass task name if it exists
|
||||
task_name = get_task_name(source)
|
||||
@@ -222,11 +221,10 @@ class EventListener(BaseEventListener):
|
||||
|
||||
@crewai_event_bus.on(TaskFailedEvent)
|
||||
def on_task_failed(source: Any, event: TaskFailedEvent) -> None:
|
||||
span = self.execution_spans.get(source)
|
||||
span = self.execution_spans.pop(source, None)
|
||||
if span:
|
||||
if source.agent and source.agent.crew:
|
||||
self._telemetry.task_ended(span, source, source.agent.crew)
|
||||
self.execution_spans[source] = None
|
||||
|
||||
# Pass task name if it exists
|
||||
task_name = get_task_name(source)
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import inspect
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, Field, InstanceOf, model_validator
|
||||
from typing_extensions import Self
|
||||
@@ -14,14 +15,14 @@ class FlowTrackable(BaseModel):
|
||||
inspecting the call stack.
|
||||
"""
|
||||
|
||||
parent_flow: InstanceOf[Flow] | None = Field(
|
||||
parent_flow: InstanceOf[Flow[Any]] | None = Field(
|
||||
default=None,
|
||||
description="The parent flow of the instance, if it was created inside a flow.",
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def _set_parent_flow(self) -> Self:
|
||||
max_depth = 5
|
||||
max_depth = 8
|
||||
frame = inspect.currentframe()
|
||||
|
||||
try:
|
||||
|
||||
@@ -443,7 +443,7 @@ class AzureCompletion(BaseLLM):
|
||||
params["presence_penalty"] = self.presence_penalty
|
||||
if self.max_tokens is not None:
|
||||
params["max_tokens"] = self.max_tokens
|
||||
if self.stop:
|
||||
if self.stop and self.supports_stop_words():
|
||||
params["stop"] = self.stop
|
||||
|
||||
# Handle tools/functions for Azure OpenAI models
|
||||
@@ -931,8 +931,28 @@ class AzureCompletion(BaseLLM):
|
||||
return self.is_openai_model
|
||||
|
||||
def supports_stop_words(self) -> bool:
|
||||
"""Check if the model supports stop words."""
|
||||
return True # Most Azure models support stop sequences
|
||||
"""Check if the model supports stop words.
|
||||
|
||||
Models using the Responses API (GPT-5 family, o-series reasoning models,
|
||||
computer-use-preview) do not support stop sequences.
|
||||
See: https://learn.microsoft.com/en-us/azure/ai-foundry/foundry-models/concepts/models-sold-directly-by-azure
|
||||
"""
|
||||
model_lower = self.model.lower() if self.model else ""
|
||||
|
||||
if "gpt-5" in model_lower:
|
||||
return False
|
||||
|
||||
o_series_models = ["o1", "o3", "o4", "o1-mini", "o3-mini", "o4-mini"]
|
||||
|
||||
responses_api_models = ["computer-use-preview"]
|
||||
|
||||
unsupported_stop_models = o_series_models + responses_api_models
|
||||
|
||||
for unsupported in unsupported_stop_models:
|
||||
if unsupported in model_lower:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def get_context_window_size(self) -> int:
|
||||
"""Get the context window size for the model."""
|
||||
|
||||
@@ -515,6 +515,94 @@ def test_azure_supports_stop_words():
|
||||
assert llm.supports_stop_words() == True
|
||||
|
||||
|
||||
def test_azure_gpt5_models_do_not_support_stop_words():
|
||||
"""
|
||||
Test that GPT-5 family models do not support stop words.
|
||||
GPT-5 models use the Responses API which doesn't support stop sequences.
|
||||
See: https://learn.microsoft.com/en-us/azure/ai-foundry/foundry-models/concepts/models-sold-directly-by-azure
|
||||
"""
|
||||
# GPT-5 base models
|
||||
gpt5_models = [
|
||||
"azure/gpt-5",
|
||||
"azure/gpt-5-mini",
|
||||
"azure/gpt-5-nano",
|
||||
"azure/gpt-5-chat",
|
||||
# GPT-5.1 series
|
||||
"azure/gpt-5.1",
|
||||
"azure/gpt-5.1-chat",
|
||||
"azure/gpt-5.1-codex",
|
||||
"azure/gpt-5.1-codex-mini",
|
||||
# GPT-5.2 series
|
||||
"azure/gpt-5.2",
|
||||
"azure/gpt-5.2-chat",
|
||||
]
|
||||
|
||||
for model_name in gpt5_models:
|
||||
llm = LLM(model=model_name)
|
||||
assert llm.supports_stop_words() == False, f"Expected {model_name} to NOT support stop words"
|
||||
|
||||
|
||||
def test_azure_o_series_models_do_not_support_stop_words():
|
||||
"""
|
||||
Test that o-series reasoning models do not support stop words.
|
||||
"""
|
||||
o_series_models = [
|
||||
"azure/o1",
|
||||
"azure/o1-mini",
|
||||
"azure/o3",
|
||||
"azure/o3-mini",
|
||||
"azure/o4",
|
||||
"azure/o4-mini",
|
||||
]
|
||||
|
||||
for model_name in o_series_models:
|
||||
llm = LLM(model=model_name)
|
||||
assert llm.supports_stop_words() == False, f"Expected {model_name} to NOT support stop words"
|
||||
|
||||
|
||||
def test_azure_responses_api_models_do_not_support_stop_words():
|
||||
"""
|
||||
Test that models using the Responses API do not support stop words.
|
||||
"""
|
||||
responses_api_models = [
|
||||
"azure/computer-use-preview",
|
||||
]
|
||||
|
||||
for model_name in responses_api_models:
|
||||
llm = LLM(model=model_name)
|
||||
assert llm.supports_stop_words() == False, f"Expected {model_name} to NOT support stop words"
|
||||
|
||||
|
||||
def test_azure_stop_words_not_included_for_unsupported_models():
|
||||
"""
|
||||
Test that stop words are not included in completion params for models that don't support them.
|
||||
"""
|
||||
with patch.dict(os.environ, {
|
||||
"AZURE_API_KEY": "test-key",
|
||||
"AZURE_ENDPOINT": "https://models.inference.ai.azure.com"
|
||||
}):
|
||||
# Test GPT-5 model - stop should NOT be included even if set
|
||||
llm_gpt5 = LLM(
|
||||
model="azure/gpt-5-nano",
|
||||
stop=["STOP", "END"]
|
||||
)
|
||||
params = llm_gpt5._prepare_completion_params(
|
||||
messages=[{"role": "user", "content": "test"}]
|
||||
)
|
||||
assert "stop" not in params, "stop should not be included for GPT-5 models"
|
||||
|
||||
# Test regular model - stop SHOULD be included
|
||||
llm_gpt4 = LLM(
|
||||
model="azure/gpt-4",
|
||||
stop=["STOP", "END"]
|
||||
)
|
||||
params = llm_gpt4._prepare_completion_params(
|
||||
messages=[{"role": "user", "content": "test"}]
|
||||
)
|
||||
assert "stop" in params, "stop should be included for GPT-4 models"
|
||||
assert params["stop"] == ["STOP", "END"]
|
||||
|
||||
|
||||
def test_azure_context_window_size():
|
||||
"""
|
||||
Test that Azure models return correct context window sizes
|
||||
|
||||
@@ -4500,6 +4500,71 @@ def test_crew_copy_with_memory():
|
||||
pytest.fail(f"Copying crew raised an unexpected exception: {e}")
|
||||
|
||||
|
||||
def test_sets_parent_flow_when_using_crewbase_pattern_inside_flow():
|
||||
@CrewBase
|
||||
class TestCrew:
|
||||
agents_config = None
|
||||
tasks_config = None
|
||||
|
||||
agents: list[BaseAgent]
|
||||
tasks: list[Task]
|
||||
|
||||
@agent
|
||||
def researcher(self) -> Agent:
|
||||
return Agent(
|
||||
role="Researcher",
|
||||
goal="Research things",
|
||||
backstory="Expert researcher",
|
||||
)
|
||||
|
||||
@agent
|
||||
def writer_agent(self) -> Agent:
|
||||
return Agent(
|
||||
role="Writer",
|
||||
goal="Write things",
|
||||
backstory="Expert writer",
|
||||
)
|
||||
|
||||
@task
|
||||
def research_task(self) -> Task:
|
||||
return Task(
|
||||
description="Test task for researcher",
|
||||
expected_output="output",
|
||||
agent=self.researcher(),
|
||||
)
|
||||
|
||||
@task
|
||||
def write_task(self) -> Task:
|
||||
return Task(
|
||||
description="Test task for writer",
|
||||
expected_output="output",
|
||||
agent=self.writer_agent(),
|
||||
)
|
||||
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
return Crew(
|
||||
agents=self.agents,
|
||||
tasks=self.tasks,
|
||||
process=Process.sequential,
|
||||
)
|
||||
|
||||
captured_crew = None
|
||||
|
||||
class MyFlow(Flow):
|
||||
@start()
|
||||
def start_method(self):
|
||||
nonlocal captured_crew
|
||||
captured_crew = TestCrew().crew()
|
||||
return captured_crew
|
||||
|
||||
flow = MyFlow()
|
||||
flow.kickoff()
|
||||
|
||||
assert captured_crew is not None
|
||||
assert captured_crew.parent_flow is flow
|
||||
|
||||
|
||||
def test_sets_parent_flow_when_outside_flow(researcher, writer):
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
|
||||
Reference in New Issue
Block a user