mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-20 13:28:13 +00:00
Compare commits
5 Commits
gl/feat/ev
...
lorenze/fi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
806863eae7 | ||
|
|
e83b7554bf | ||
|
|
7834b07ce4 | ||
|
|
a9bb03ffa8 | ||
|
|
5beaea189b |
15
conftest.py
15
conftest.py
@@ -31,21 +31,6 @@ def cleanup_event_handlers() -> Generator[None, Any, None]:
|
||||
pass
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True, scope="function")
|
||||
def reset_event_state() -> None:
|
||||
"""Reset event system state before each test for isolation."""
|
||||
from crewai.events.base_events import reset_emission_counter
|
||||
from crewai.events.event_context import (
|
||||
EventContextConfig,
|
||||
_event_context_config,
|
||||
_event_id_stack,
|
||||
)
|
||||
|
||||
reset_emission_counter()
|
||||
_event_id_stack.set(())
|
||||
_event_context_config.set(EventContextConfig())
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True, scope="function")
|
||||
def setup_test_environment() -> Generator[None, Any, None]:
|
||||
"""Setup test environment for crewAI workspace."""
|
||||
|
||||
@@ -375,10 +375,13 @@ In this section, you'll find detailed examples that help you select, configure,
|
||||
GOOGLE_API_KEY=<your-api-key>
|
||||
GEMINI_API_KEY=<your-api-key>
|
||||
|
||||
# Optional - for Vertex AI
|
||||
# For Vertex AI Express mode (API key authentication)
|
||||
GOOGLE_GENAI_USE_VERTEXAI=true
|
||||
GOOGLE_API_KEY=<your-api-key>
|
||||
|
||||
# For Vertex AI with service account
|
||||
GOOGLE_CLOUD_PROJECT=<your-project-id>
|
||||
GOOGLE_CLOUD_LOCATION=<location> # Defaults to us-central1
|
||||
GOOGLE_GENAI_USE_VERTEXAI=true # Set to use Vertex AI
|
||||
```
|
||||
|
||||
**Basic Usage:**
|
||||
@@ -412,7 +415,35 @@ In this section, you'll find detailed examples that help you select, configure,
|
||||
)
|
||||
```
|
||||
|
||||
**Vertex AI Configuration:**
|
||||
**Vertex AI Express Mode (API Key Authentication):**
|
||||
|
||||
Vertex AI Express mode allows you to use Vertex AI with simple API key authentication instead of service account credentials. This is the quickest way to get started with Vertex AI.
|
||||
|
||||
To enable Express mode, set both environment variables in your `.env` file:
|
||||
```toml .env
|
||||
GOOGLE_GENAI_USE_VERTEXAI=true
|
||||
GOOGLE_API_KEY=<your-api-key>
|
||||
```
|
||||
|
||||
Then use the LLM as usual:
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
llm = LLM(
|
||||
model="gemini/gemini-2.0-flash",
|
||||
temperature=0.7
|
||||
)
|
||||
```
|
||||
|
||||
<Info>
|
||||
To get an Express mode API key:
|
||||
- New Google Cloud users: Get an [express mode API key](https://cloud.google.com/vertex-ai/generative-ai/docs/start/quickstart?usertype=apikey)
|
||||
- Existing Google Cloud users: Get a [Google Cloud API key bound to a service account](https://cloud.google.com/docs/authentication/api-keys)
|
||||
|
||||
For more details, see the [Vertex AI Express mode documentation](https://docs.cloud.google.com/vertex-ai/generative-ai/docs/start/quickstart?usertype=apikey).
|
||||
</Info>
|
||||
|
||||
**Vertex AI Configuration (Service Account):**
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
@@ -424,10 +455,10 @@ In this section, you'll find detailed examples that help you select, configure,
|
||||
```
|
||||
|
||||
**Supported Environment Variables:**
|
||||
- `GOOGLE_API_KEY` or `GEMINI_API_KEY`: Your Google API key (required for Gemini API)
|
||||
- `GOOGLE_CLOUD_PROJECT`: Google Cloud project ID (for Vertex AI)
|
||||
- `GOOGLE_API_KEY` or `GEMINI_API_KEY`: Your Google API key (required for Gemini API and Vertex AI Express mode)
|
||||
- `GOOGLE_GENAI_USE_VERTEXAI`: Set to `true` to use Vertex AI (required for Express mode)
|
||||
- `GOOGLE_CLOUD_PROJECT`: Google Cloud project ID (for Vertex AI with service account)
|
||||
- `GOOGLE_CLOUD_LOCATION`: GCP location (defaults to `us-central1`)
|
||||
- `GOOGLE_GENAI_USE_VERTEXAI`: Set to `true` to use Vertex AI
|
||||
|
||||
**Features:**
|
||||
- Native function calling support for Gemini 1.5+ and 2.x models
|
||||
|
||||
@@ -107,7 +107,7 @@ CrewAI 코드 내에는 사용할 모델을 지정할 수 있는 여러 위치
|
||||
|
||||
## 공급자 구성 예시
|
||||
|
||||
CrewAI는 고유한 기능, 인증 방법, 모델 역량을 제공하는 다양한 LLM 공급자를 지원합니다.
|
||||
CrewAI는 고유한 기능, 인증 방법, 모델 역량을 제공하는 다양한 LLM 공급자를 지원합니다.
|
||||
이 섹션에서는 프로젝트의 요구에 가장 적합한 LLM을 선택, 구성, 최적화하는 데 도움이 되는 자세한 예시를 제공합니다.
|
||||
|
||||
<AccordionGroup>
|
||||
@@ -153,8 +153,8 @@ CrewAI는 고유한 기능, 인증 방법, 모델 역량을 제공하는 다양
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Meta-Llama">
|
||||
Meta의 Llama API는 Meta의 대형 언어 모델 패밀리 접근을 제공합니다.
|
||||
API는 [Meta Llama API](https://llama.developer.meta.com?utm_source=partner-crewai&utm_medium=website)에서 사용할 수 있습니다.
|
||||
Meta의 Llama API는 Meta의 대형 언어 모델 패밀리 접근을 제공합니다.
|
||||
API는 [Meta Llama API](https://llama.developer.meta.com?utm_source=partner-crewai&utm_medium=website)에서 사용할 수 있습니다.
|
||||
`.env` 파일에 다음 환경 변수를 설정하십시오:
|
||||
|
||||
```toml Code
|
||||
@@ -207,11 +207,20 @@ CrewAI는 고유한 기능, 인증 방법, 모델 역량을 제공하는 다양
|
||||
`.env` 파일에 API 키를 설정하십시오. 키가 필요하거나 기존 키를 찾으려면 [AI Studio](https://aistudio.google.com/apikey)를 확인하세요.
|
||||
|
||||
```toml .env
|
||||
# https://ai.google.dev/gemini-api/docs/api-key
|
||||
# Gemini API 사용 시 (다음 중 하나)
|
||||
GOOGLE_API_KEY=<your-api-key>
|
||||
GEMINI_API_KEY=<your-api-key>
|
||||
|
||||
# Vertex AI Express 모드 사용 시 (API 키 인증)
|
||||
GOOGLE_GENAI_USE_VERTEXAI=true
|
||||
GOOGLE_API_KEY=<your-api-key>
|
||||
|
||||
# Vertex AI 서비스 계정 사용 시
|
||||
GOOGLE_CLOUD_PROJECT=<your-project-id>
|
||||
GOOGLE_CLOUD_LOCATION=<location> # 기본값: us-central1
|
||||
```
|
||||
|
||||
CrewAI 프로젝트에서의 예시 사용법:
|
||||
**기본 사용법:**
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
@@ -221,6 +230,34 @@ CrewAI는 고유한 기능, 인증 방법, 모델 역량을 제공하는 다양
|
||||
)
|
||||
```
|
||||
|
||||
**Vertex AI Express 모드 (API 키 인증):**
|
||||
|
||||
Vertex AI Express 모드를 사용하면 서비스 계정 자격 증명 대신 간단한 API 키 인증으로 Vertex AI를 사용할 수 있습니다. Vertex AI를 시작하는 가장 빠른 방법입니다.
|
||||
|
||||
Express 모드를 활성화하려면 `.env` 파일에 두 환경 변수를 모두 설정하세요:
|
||||
```toml .env
|
||||
GOOGLE_GENAI_USE_VERTEXAI=true
|
||||
GOOGLE_API_KEY=<your-api-key>
|
||||
```
|
||||
|
||||
그런 다음 평소처럼 LLM을 사용하세요:
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
llm = LLM(
|
||||
model="gemini/gemini-2.0-flash",
|
||||
temperature=0.7
|
||||
)
|
||||
```
|
||||
|
||||
<Info>
|
||||
Express 모드 API 키를 받으려면:
|
||||
- 신규 Google Cloud 사용자: [Express 모드 API 키](https://cloud.google.com/vertex-ai/generative-ai/docs/start/quickstart?usertype=apikey) 받기
|
||||
- 기존 Google Cloud 사용자: [서비스 계정에 바인딩된 Google Cloud API 키](https://cloud.google.com/docs/authentication/api-keys) 받기
|
||||
|
||||
자세한 내용은 [Vertex AI Express 모드 문서](https://docs.cloud.google.com/vertex-ai/generative-ai/docs/start/quickstart?usertype=apikey)를 참조하세요.
|
||||
</Info>
|
||||
|
||||
### Gemini 모델
|
||||
|
||||
Google은 다양한 용도에 최적화된 강력한 모델을 제공합니다.
|
||||
@@ -476,7 +513,7 @@ CrewAI는 고유한 기능, 인증 방법, 모델 역량을 제공하는 다양
|
||||
|
||||
<Accordion title="Local NVIDIA NIM Deployed using WSL2">
|
||||
|
||||
NVIDIA NIM을 이용하면 Windows 기기에서 WSL2(Windows Subsystem for Linux)를 통해 강력한 LLM을 로컬로 실행할 수 있습니다.
|
||||
NVIDIA NIM을 이용하면 Windows 기기에서 WSL2(Windows Subsystem for Linux)를 통해 강력한 LLM을 로컬로 실행할 수 있습니다.
|
||||
이 방식은 Nvidia GPU를 활용하여 프라이빗하고, 안전하며, 비용 효율적인 AI 추론을 클라우드 서비스에 의존하지 않고 구현할 수 있습니다.
|
||||
데이터 프라이버시, 오프라인 기능이 필요한 개발, 테스트, 또는 프로덕션 환경에 최적입니다.
|
||||
|
||||
@@ -954,4 +991,4 @@ LLM 설정을 최대한 활용하는 방법을 알아보세요:
|
||||
llm = LLM(model="openai/gpt-4o") # 128K tokens
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
</Tabs>
|
||||
|
||||
@@ -79,7 +79,7 @@ Existem diferentes locais no código do CrewAI onde você pode especificar o mod
|
||||
|
||||
# Configuração avançada com parâmetros detalhados
|
||||
llm = LLM(
|
||||
model="openai/gpt-4",
|
||||
model="openai/gpt-4",
|
||||
temperature=0.8,
|
||||
max_tokens=150,
|
||||
top_p=0.9,
|
||||
@@ -207,11 +207,20 @@ Nesta seção, você encontrará exemplos detalhados que ajudam a selecionar, co
|
||||
Defina sua chave de API no seu arquivo `.env`. Se precisar de uma chave, ou encontrar uma existente, verifique o [AI Studio](https://aistudio.google.com/apikey).
|
||||
|
||||
```toml .env
|
||||
# https://ai.google.dev/gemini-api/docs/api-key
|
||||
# Para API Gemini (uma das seguintes)
|
||||
GOOGLE_API_KEY=<your-api-key>
|
||||
GEMINI_API_KEY=<your-api-key>
|
||||
|
||||
# Para Vertex AI Express mode (autenticação por chave de API)
|
||||
GOOGLE_GENAI_USE_VERTEXAI=true
|
||||
GOOGLE_API_KEY=<your-api-key>
|
||||
|
||||
# Para Vertex AI com conta de serviço
|
||||
GOOGLE_CLOUD_PROJECT=<your-project-id>
|
||||
GOOGLE_CLOUD_LOCATION=<location> # Padrão: us-central1
|
||||
```
|
||||
|
||||
Exemplo de uso em seu projeto CrewAI:
|
||||
**Uso Básico:**
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
@@ -221,6 +230,34 @@ Nesta seção, você encontrará exemplos detalhados que ajudam a selecionar, co
|
||||
)
|
||||
```
|
||||
|
||||
**Vertex AI Express Mode (Autenticação por Chave de API):**
|
||||
|
||||
O Vertex AI Express mode permite usar o Vertex AI com autenticação simples por chave de API, em vez de credenciais de conta de serviço. Esta é a maneira mais rápida de começar com o Vertex AI.
|
||||
|
||||
Para habilitar o Express mode, defina ambas as variáveis de ambiente no seu arquivo `.env`:
|
||||
```toml .env
|
||||
GOOGLE_GENAI_USE_VERTEXAI=true
|
||||
GOOGLE_API_KEY=<your-api-key>
|
||||
```
|
||||
|
||||
Em seguida, use o LLM normalmente:
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
llm = LLM(
|
||||
model="gemini/gemini-2.0-flash",
|
||||
temperature=0.7
|
||||
)
|
||||
```
|
||||
|
||||
<Info>
|
||||
Para obter uma chave de API do Express mode:
|
||||
- Novos usuários do Google Cloud: Obtenha uma [chave de API do Express mode](https://cloud.google.com/vertex-ai/generative-ai/docs/start/quickstart?usertype=apikey)
|
||||
- Usuários existentes do Google Cloud: Obtenha uma [chave de API do Google Cloud vinculada a uma conta de serviço](https://cloud.google.com/docs/authentication/api-keys)
|
||||
|
||||
Para mais detalhes, consulte a [documentação do Vertex AI Express mode](https://docs.cloud.google.com/vertex-ai/generative-ai/docs/start/quickstart?usertype=apikey).
|
||||
</Info>
|
||||
|
||||
### Modelos Gemini
|
||||
|
||||
O Google oferece uma variedade de modelos poderosos otimizados para diferentes casos de uso.
|
||||
@@ -823,7 +860,7 @@ Saiba como obter o máximo da configuração do seu LLM:
|
||||
Lembre-se de monitorar regularmente o uso de tokens e ajustar suas configurações para otimizar custos e desempenho.
|
||||
</Info>
|
||||
</Accordion>
|
||||
|
||||
|
||||
<Accordion title="Descartar Parâmetros Adicionais">
|
||||
O CrewAI usa Litellm internamente para chamadas LLM, permitindo descartar parâmetros adicionais desnecessários para seu caso de uso. Isso pode simplificar seu código e reduzir a complexidade da configuração do LLM.
|
||||
Por exemplo, se não precisar enviar o parâmetro <code>stop</code>, basta omiti-lo na chamada do LLM:
|
||||
@@ -882,4 +919,4 @@ Saiba como obter o máximo da configuração do seu LLM:
|
||||
llm = LLM(model="openai/gpt-4o") # 128K tokens
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
</Tabs>
|
||||
|
||||
@@ -189,14 +189,9 @@ def prepare_kickoff(crew: Crew, inputs: dict[str, Any] | None) -> dict[str, Any]
|
||||
Returns:
|
||||
The potentially modified inputs dictionary after before callbacks.
|
||||
"""
|
||||
from crewai.events.base_events import reset_emission_counter
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.event_context import get_current_parent_id
|
||||
from crewai.events.types.crew_events import CrewKickoffStartedEvent
|
||||
|
||||
if get_current_parent_id() is None:
|
||||
reset_emission_counter()
|
||||
|
||||
for before_callback in crew.before_kickoff_callbacks:
|
||||
if inputs is None:
|
||||
inputs = {}
|
||||
|
||||
@@ -1,35 +1,9 @@
|
||||
from collections.abc import Iterator
|
||||
from datetime import datetime, timezone
|
||||
import itertools
|
||||
from typing import Any
|
||||
import uuid
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.utilities.serialization import Serializable, to_serializable
|
||||
|
||||
|
||||
_emission_counter: Iterator[int] = itertools.count(start=1)
|
||||
|
||||
|
||||
def get_next_emission_sequence() -> int:
|
||||
"""Get the next emission sequence number.
|
||||
|
||||
Thread-safe due to atomic next() on itertools.count under the GIL.
|
||||
|
||||
Returns:
|
||||
The next sequence number.
|
||||
"""
|
||||
return next(_emission_counter)
|
||||
|
||||
|
||||
def reset_emission_counter() -> None:
|
||||
"""Reset the emission sequence counter to 1.
|
||||
|
||||
Useful for test isolation.
|
||||
"""
|
||||
global _emission_counter
|
||||
_emission_counter = itertools.count(start=1)
|
||||
from crewai.utilities.serialization import to_serializable
|
||||
|
||||
|
||||
class BaseEvent(BaseModel):
|
||||
@@ -48,11 +22,7 @@ class BaseEvent(BaseModel):
|
||||
agent_id: str | None = None
|
||||
agent_role: str | None = None
|
||||
|
||||
event_id: str = Field(default_factory=lambda: str(uuid.uuid4()))
|
||||
parent_event_id: str | None = None
|
||||
emission_sequence: int | None = None
|
||||
|
||||
def to_json(self, exclude: set[str] | None = None) -> Serializable:
|
||||
def to_json(self, exclude: set[str] | None = None):
|
||||
"""
|
||||
Converts the event to a JSON-serializable dictionary.
|
||||
|
||||
@@ -64,13 +34,13 @@ class BaseEvent(BaseModel):
|
||||
"""
|
||||
return to_serializable(self, exclude=exclude)
|
||||
|
||||
def _set_task_params(self, data: dict[str, Any]) -> None:
|
||||
def _set_task_params(self, data: dict[str, Any]):
|
||||
if "from_task" in data and (task := data["from_task"]):
|
||||
self.task_id = str(task.id)
|
||||
self.task_name = task.name or task.description
|
||||
self.from_task = None
|
||||
|
||||
def _set_agent_params(self, data: dict[str, Any]) -> None:
|
||||
def _set_agent_params(self, data: dict[str, Any]):
|
||||
task = data.get("from_task", None)
|
||||
agent = task.agent if task else data.get("from_agent", None)
|
||||
|
||||
|
||||
@@ -16,19 +16,8 @@ from typing import Any, Final, ParamSpec, TypeVar
|
||||
|
||||
from typing_extensions import Self
|
||||
|
||||
from crewai.events.base_events import BaseEvent, get_next_emission_sequence
|
||||
from crewai.events.base_events import BaseEvent
|
||||
from crewai.events.depends import Depends
|
||||
from crewai.events.event_context import (
|
||||
SCOPE_ENDING_EVENTS,
|
||||
SCOPE_STARTING_EVENTS,
|
||||
VALID_EVENT_PAIRS,
|
||||
get_current_parent_id,
|
||||
get_enclosing_parent_id,
|
||||
handle_empty_pop,
|
||||
handle_mismatch,
|
||||
pop_event_scope,
|
||||
push_event_scope,
|
||||
)
|
||||
from crewai.events.handler_graph import build_execution_plan
|
||||
from crewai.events.types.event_bus_types import (
|
||||
AsyncHandler,
|
||||
@@ -80,8 +69,6 @@ class CrewAIEventsBus:
|
||||
_execution_plan_cache: dict[type[BaseEvent], ExecutionPlan]
|
||||
_console: ConsoleFormatter
|
||||
_shutting_down: bool
|
||||
_pending_futures: set[Future[Any]]
|
||||
_futures_lock: threading.Lock
|
||||
|
||||
def __new__(cls) -> Self:
|
||||
"""Create or return the singleton instance.
|
||||
@@ -104,8 +91,6 @@ class CrewAIEventsBus:
|
||||
"""
|
||||
self._shutting_down = False
|
||||
self._rwlock = RWLock()
|
||||
self._pending_futures: set[Future[Any]] = set()
|
||||
self._futures_lock = threading.Lock()
|
||||
self._sync_handlers: dict[type[BaseEvent], SyncHandlerSet] = {}
|
||||
self._async_handlers: dict[type[BaseEvent], AsyncHandlerSet] = {}
|
||||
self._handler_dependencies: dict[
|
||||
@@ -126,25 +111,6 @@ class CrewAIEventsBus:
|
||||
)
|
||||
self._loop_thread.start()
|
||||
|
||||
def _track_future(self, future: Future[Any]) -> Future[Any]:
|
||||
"""Track a future and set up automatic cleanup when it completes.
|
||||
|
||||
Args:
|
||||
future: The future to track
|
||||
|
||||
Returns:
|
||||
The same future for chaining
|
||||
"""
|
||||
with self._futures_lock:
|
||||
self._pending_futures.add(future)
|
||||
|
||||
def _cleanup(f: Future[Any]) -> None:
|
||||
with self._futures_lock:
|
||||
self._pending_futures.discard(f)
|
||||
|
||||
future.add_done_callback(_cleanup)
|
||||
return future
|
||||
|
||||
def _run_loop(self) -> None:
|
||||
"""Run the background async event loop."""
|
||||
asyncio.set_event_loop(self._loop)
|
||||
@@ -360,25 +326,6 @@ class CrewAIEventsBus:
|
||||
... await asyncio.wrap_future(future) # In async test
|
||||
... # or future.result(timeout=5.0) in sync code
|
||||
"""
|
||||
event.emission_sequence = get_next_emission_sequence()
|
||||
if event.parent_event_id is None:
|
||||
event_type_name = event.type
|
||||
if event_type_name in SCOPE_ENDING_EVENTS:
|
||||
event.parent_event_id = get_enclosing_parent_id()
|
||||
popped = pop_event_scope()
|
||||
if popped is None:
|
||||
handle_empty_pop(event_type_name)
|
||||
else:
|
||||
_, popped_type = popped
|
||||
expected_start = VALID_EVENT_PAIRS.get(event_type_name)
|
||||
if expected_start and popped_type and popped_type != expected_start:
|
||||
handle_mismatch(event_type_name, popped_type, expected_start)
|
||||
elif event_type_name in SCOPE_STARTING_EVENTS:
|
||||
event.parent_event_id = get_current_parent_id()
|
||||
push_event_scope(event.event_id, event_type_name)
|
||||
else:
|
||||
event.parent_event_id = get_current_parent_id()
|
||||
|
||||
event_type = type(event)
|
||||
|
||||
with self._rwlock.r_locked():
|
||||
@@ -392,11 +339,9 @@ class CrewAIEventsBus:
|
||||
async_handlers = self._async_handlers.get(event_type, frozenset())
|
||||
|
||||
if has_dependencies:
|
||||
return self._track_future(
|
||||
asyncio.run_coroutine_threadsafe(
|
||||
self._emit_with_dependencies(source, event),
|
||||
self._loop,
|
||||
)
|
||||
return asyncio.run_coroutine_threadsafe(
|
||||
self._emit_with_dependencies(source, event),
|
||||
self._loop,
|
||||
)
|
||||
|
||||
if sync_handlers:
|
||||
@@ -408,53 +353,16 @@ class CrewAIEventsBus:
|
||||
ctx.run, self._call_handlers, source, event, sync_handlers
|
||||
)
|
||||
if not async_handlers:
|
||||
return self._track_future(sync_future)
|
||||
return sync_future
|
||||
|
||||
if async_handlers:
|
||||
return self._track_future(
|
||||
asyncio.run_coroutine_threadsafe(
|
||||
self._acall_handlers(source, event, async_handlers),
|
||||
self._loop,
|
||||
)
|
||||
return asyncio.run_coroutine_threadsafe(
|
||||
self._acall_handlers(source, event, async_handlers),
|
||||
self._loop,
|
||||
)
|
||||
|
||||
return None
|
||||
|
||||
def flush(self, timeout: float | None = None) -> bool:
|
||||
"""Block until all pending event handlers complete.
|
||||
|
||||
This method waits for all futures from previously emitted events to
|
||||
finish executing. Useful at the end of operations (like kickoff) to
|
||||
ensure all event handlers have completed before returning.
|
||||
|
||||
Args:
|
||||
timeout: Maximum time in seconds to wait for handlers to complete.
|
||||
If None, waits indefinitely.
|
||||
|
||||
Returns:
|
||||
True if all handlers completed, False if timeout occurred.
|
||||
"""
|
||||
with self._futures_lock:
|
||||
futures_to_wait = list(self._pending_futures)
|
||||
|
||||
if not futures_to_wait:
|
||||
return True
|
||||
|
||||
from concurrent.futures import wait as wait_futures
|
||||
|
||||
done, not_done = wait_futures(futures_to_wait, timeout=timeout)
|
||||
|
||||
# Check for exceptions in completed futures
|
||||
errors = [
|
||||
future.exception() for future in done if future.exception() is not None
|
||||
]
|
||||
for error in errors:
|
||||
self._console.print(
|
||||
f"[CrewAIEventsBus] Handler exception during flush: {error}"
|
||||
)
|
||||
|
||||
return len(not_done) == 0
|
||||
|
||||
async def aemit(self, source: Any, event: BaseEvent) -> None:
|
||||
"""Asynchronously emit an event to registered async handlers.
|
||||
|
||||
@@ -556,9 +464,6 @@ class CrewAIEventsBus:
|
||||
wait: If True, wait for all pending tasks to complete before stopping.
|
||||
If False, cancel all pending tasks immediately.
|
||||
"""
|
||||
if wait:
|
||||
self.flush()
|
||||
|
||||
with self._rwlock.w_locked():
|
||||
self._shutting_down = True
|
||||
loop = getattr(self, "_loop", None)
|
||||
|
||||
@@ -1,260 +0,0 @@
|
||||
"""Event context management for parent-child relationship tracking."""
|
||||
|
||||
from collections.abc import Generator
|
||||
from contextlib import contextmanager
|
||||
import contextvars
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
|
||||
from crewai.events.utils.console_formatter import ConsoleFormatter
|
||||
|
||||
|
||||
class MismatchBehavior(Enum):
|
||||
"""Behavior when event pairs don't match."""
|
||||
|
||||
WARN = "warn"
|
||||
RAISE = "raise"
|
||||
SILENT = "silent"
|
||||
|
||||
|
||||
@dataclass
|
||||
class EventContextConfig:
|
||||
"""Configuration for event context behavior."""
|
||||
|
||||
max_stack_depth: int = 100
|
||||
mismatch_behavior: MismatchBehavior = MismatchBehavior.WARN
|
||||
empty_pop_behavior: MismatchBehavior = MismatchBehavior.WARN
|
||||
|
||||
|
||||
class StackDepthExceededError(Exception):
|
||||
"""Raised when stack depth limit is exceeded."""
|
||||
|
||||
|
||||
class EventPairingError(Exception):
|
||||
"""Raised when event pairs don't match."""
|
||||
|
||||
|
||||
class EmptyStackError(Exception):
|
||||
"""Raised when popping from empty stack."""
|
||||
|
||||
|
||||
_event_id_stack: contextvars.ContextVar[tuple[tuple[str, str], ...]] = (
|
||||
contextvars.ContextVar("_event_id_stack", default=())
|
||||
)
|
||||
|
||||
_event_context_config: contextvars.ContextVar[EventContextConfig | None] = (
|
||||
contextvars.ContextVar("_event_context_config", default=None)
|
||||
)
|
||||
|
||||
_default_config = EventContextConfig()
|
||||
|
||||
_console = ConsoleFormatter()
|
||||
|
||||
|
||||
def get_current_parent_id() -> str | None:
|
||||
"""Get the current parent event ID from the stack."""
|
||||
stack = _event_id_stack.get()
|
||||
return stack[-1][0] if stack else None
|
||||
|
||||
|
||||
def get_enclosing_parent_id() -> str | None:
|
||||
"""Get the parent of the current scope (stack[-2])."""
|
||||
stack = _event_id_stack.get()
|
||||
return stack[-2][0] if len(stack) >= 2 else None
|
||||
|
||||
|
||||
def push_event_scope(event_id: str, event_type: str = "") -> None:
|
||||
"""Push an event ID and type onto the scope stack."""
|
||||
config = _event_context_config.get() or _default_config
|
||||
stack = _event_id_stack.get()
|
||||
|
||||
if 0 < config.max_stack_depth <= len(stack):
|
||||
raise StackDepthExceededError(
|
||||
f"Event stack depth limit ({config.max_stack_depth}) exceeded. "
|
||||
f"This usually indicates missing ending events."
|
||||
)
|
||||
|
||||
_event_id_stack.set((*stack, (event_id, event_type)))
|
||||
|
||||
|
||||
def pop_event_scope() -> tuple[str, str] | None:
|
||||
"""Pop an event entry from the scope stack."""
|
||||
stack = _event_id_stack.get()
|
||||
if not stack:
|
||||
return None
|
||||
_event_id_stack.set(stack[:-1])
|
||||
return stack[-1]
|
||||
|
||||
|
||||
def handle_empty_pop(event_type_name: str) -> None:
|
||||
"""Handle a pop attempt on an empty stack."""
|
||||
config = _event_context_config.get() or _default_config
|
||||
msg = (
|
||||
f"Ending event '{event_type_name}' emitted with empty scope stack. "
|
||||
"Missing starting event?"
|
||||
)
|
||||
|
||||
if config.empty_pop_behavior == MismatchBehavior.RAISE:
|
||||
raise EmptyStackError(msg)
|
||||
if config.empty_pop_behavior == MismatchBehavior.WARN:
|
||||
_console.print(f"[CrewAIEventsBus] Warning: {msg}")
|
||||
|
||||
|
||||
def handle_mismatch(
|
||||
event_type_name: str,
|
||||
popped_type: str,
|
||||
expected_start: str,
|
||||
) -> None:
|
||||
"""Handle a mismatched event pair."""
|
||||
config = _event_context_config.get() or _default_config
|
||||
msg = (
|
||||
f"Event pairing mismatch. '{event_type_name}' closed '{popped_type}' "
|
||||
f"(expected '{expected_start}')"
|
||||
)
|
||||
|
||||
if config.mismatch_behavior == MismatchBehavior.RAISE:
|
||||
raise EventPairingError(msg)
|
||||
if config.mismatch_behavior == MismatchBehavior.WARN:
|
||||
_console.print(f"[CrewAIEventsBus] Warning: {msg}")
|
||||
|
||||
|
||||
@contextmanager
|
||||
def event_scope(event_id: str, event_type: str = "") -> Generator[None, None, None]:
|
||||
"""Context manager to establish a parent event scope."""
|
||||
stack = _event_id_stack.get()
|
||||
already_on_stack = any(entry[0] == event_id for entry in stack)
|
||||
if not already_on_stack:
|
||||
push_event_scope(event_id, event_type)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
if not already_on_stack:
|
||||
pop_event_scope()
|
||||
|
||||
|
||||
SCOPE_STARTING_EVENTS: frozenset[str] = frozenset(
|
||||
{
|
||||
"flow_started",
|
||||
"method_execution_started",
|
||||
"crew_kickoff_started",
|
||||
"crew_train_started",
|
||||
"crew_test_started",
|
||||
"agent_execution_started",
|
||||
"agent_evaluation_started",
|
||||
"lite_agent_execution_started",
|
||||
"task_started",
|
||||
"llm_call_started",
|
||||
"llm_guardrail_started",
|
||||
"tool_usage_started",
|
||||
"mcp_connection_started",
|
||||
"mcp_tool_execution_started",
|
||||
"memory_retrieval_started",
|
||||
"memory_save_started",
|
||||
"memory_query_started",
|
||||
"knowledge_query_started",
|
||||
"knowledge_search_query_started",
|
||||
"a2a_delegation_started",
|
||||
"a2a_conversation_started",
|
||||
"a2a_server_task_started",
|
||||
"a2a_parallel_delegation_started",
|
||||
"agent_reasoning_started",
|
||||
}
|
||||
)
|
||||
|
||||
SCOPE_ENDING_EVENTS: frozenset[str] = frozenset(
|
||||
{
|
||||
"flow_finished",
|
||||
"flow_paused",
|
||||
"method_execution_finished",
|
||||
"method_execution_failed",
|
||||
"crew_kickoff_completed",
|
||||
"crew_kickoff_failed",
|
||||
"crew_train_completed",
|
||||
"crew_train_failed",
|
||||
"crew_test_completed",
|
||||
"crew_test_failed",
|
||||
"agent_execution_completed",
|
||||
"agent_execution_error",
|
||||
"agent_evaluation_completed",
|
||||
"agent_evaluation_failed",
|
||||
"lite_agent_execution_completed",
|
||||
"lite_agent_execution_error",
|
||||
"task_completed",
|
||||
"task_failed",
|
||||
"llm_call_completed",
|
||||
"llm_call_failed",
|
||||
"llm_guardrail_completed",
|
||||
"llm_guardrail_failed",
|
||||
"tool_usage_finished",
|
||||
"tool_usage_error",
|
||||
"mcp_connection_completed",
|
||||
"mcp_connection_failed",
|
||||
"mcp_tool_execution_completed",
|
||||
"mcp_tool_execution_failed",
|
||||
"memory_retrieval_completed",
|
||||
"memory_save_completed",
|
||||
"memory_save_failed",
|
||||
"memory_query_completed",
|
||||
"memory_query_failed",
|
||||
"knowledge_query_completed",
|
||||
"knowledge_query_failed",
|
||||
"knowledge_search_query_completed",
|
||||
"knowledge_search_query_failed",
|
||||
"a2a_delegation_completed",
|
||||
"a2a_conversation_completed",
|
||||
"a2a_server_task_completed",
|
||||
"a2a_server_task_canceled",
|
||||
"a2a_server_task_failed",
|
||||
"a2a_parallel_delegation_completed",
|
||||
"agent_reasoning_completed",
|
||||
"agent_reasoning_failed",
|
||||
}
|
||||
)
|
||||
|
||||
VALID_EVENT_PAIRS: dict[str, str] = {
|
||||
"flow_finished": "flow_started",
|
||||
"flow_paused": "flow_started",
|
||||
"method_execution_finished": "method_execution_started",
|
||||
"method_execution_failed": "method_execution_started",
|
||||
"crew_kickoff_completed": "crew_kickoff_started",
|
||||
"crew_kickoff_failed": "crew_kickoff_started",
|
||||
"crew_train_completed": "crew_train_started",
|
||||
"crew_train_failed": "crew_train_started",
|
||||
"crew_test_completed": "crew_test_started",
|
||||
"crew_test_failed": "crew_test_started",
|
||||
"agent_execution_completed": "agent_execution_started",
|
||||
"agent_execution_error": "agent_execution_started",
|
||||
"agent_evaluation_completed": "agent_evaluation_started",
|
||||
"agent_evaluation_failed": "agent_evaluation_started",
|
||||
"lite_agent_execution_completed": "lite_agent_execution_started",
|
||||
"lite_agent_execution_error": "lite_agent_execution_started",
|
||||
"task_completed": "task_started",
|
||||
"task_failed": "task_started",
|
||||
"llm_call_completed": "llm_call_started",
|
||||
"llm_call_failed": "llm_call_started",
|
||||
"llm_guardrail_completed": "llm_guardrail_started",
|
||||
"llm_guardrail_failed": "llm_guardrail_started",
|
||||
"tool_usage_finished": "tool_usage_started",
|
||||
"tool_usage_error": "tool_usage_started",
|
||||
"mcp_connection_completed": "mcp_connection_started",
|
||||
"mcp_connection_failed": "mcp_connection_started",
|
||||
"mcp_tool_execution_completed": "mcp_tool_execution_started",
|
||||
"mcp_tool_execution_failed": "mcp_tool_execution_started",
|
||||
"memory_retrieval_completed": "memory_retrieval_started",
|
||||
"memory_save_completed": "memory_save_started",
|
||||
"memory_save_failed": "memory_save_started",
|
||||
"memory_query_completed": "memory_query_started",
|
||||
"memory_query_failed": "memory_query_started",
|
||||
"knowledge_query_completed": "knowledge_query_started",
|
||||
"knowledge_query_failed": "knowledge_query_started",
|
||||
"knowledge_search_query_completed": "knowledge_search_query_started",
|
||||
"knowledge_search_query_failed": "knowledge_search_query_started",
|
||||
"a2a_delegation_completed": "a2a_delegation_started",
|
||||
"a2a_conversation_completed": "a2a_conversation_started",
|
||||
"a2a_server_task_completed": "a2a_server_task_started",
|
||||
"a2a_server_task_canceled": "a2a_server_task_started",
|
||||
"a2a_server_task_failed": "a2a_server_task_started",
|
||||
"a2a_parallel_delegation_completed": "a2a_parallel_delegation_started",
|
||||
"agent_reasoning_completed": "agent_reasoning_started",
|
||||
"agent_reasoning_failed": "agent_reasoning_started",
|
||||
}
|
||||
@@ -267,12 +267,9 @@ class TraceBatchManager:
|
||||
|
||||
sorted_events = sorted(
|
||||
self.event_buffer,
|
||||
key=lambda e: (
|
||||
e.emission_sequence
|
||||
if e.emission_sequence is not None
|
||||
else float("inf"),
|
||||
e.timestamp if hasattr(e, "timestamp") and e.timestamp else "",
|
||||
),
|
||||
key=lambda e: e.timestamp
|
||||
if hasattr(e, "timestamp") and e.timestamp
|
||||
else "",
|
||||
)
|
||||
|
||||
self.current_batch.events = sorted_events
|
||||
|
||||
@@ -9,7 +9,6 @@ from typing_extensions import Self
|
||||
from crewai.cli.authentication.token import AuthError, get_auth_token
|
||||
from crewai.cli.version import get_crewai_version
|
||||
from crewai.events.base_event_listener import BaseEventListener
|
||||
from crewai.events.base_events import BaseEvent
|
||||
from crewai.events.event_bus import CrewAIEventsBus
|
||||
from crewai.events.listeners.tracing.first_time_trace_handler import (
|
||||
FirstTimeTraceHandler,
|
||||
@@ -617,7 +616,7 @@ class TraceCollectionListener(BaseEventListener):
|
||||
if self.batch_manager.is_batch_initialized():
|
||||
self.batch_manager.finalize_batch()
|
||||
|
||||
def _initialize_crew_batch(self, source: Any, event: BaseEvent) -> None:
|
||||
def _initialize_crew_batch(self, source: Any, event: Any) -> None:
|
||||
"""Initialize trace batch.
|
||||
|
||||
Args:
|
||||
@@ -627,7 +626,7 @@ class TraceCollectionListener(BaseEventListener):
|
||||
user_context = self._get_user_context()
|
||||
execution_metadata = {
|
||||
"crew_name": getattr(event, "crew_name", "Unknown Crew"),
|
||||
"execution_start": event.timestamp,
|
||||
"execution_start": event.timestamp if hasattr(event, "timestamp") else None,
|
||||
"crewai_version": get_crewai_version(),
|
||||
}
|
||||
|
||||
@@ -636,7 +635,7 @@ class TraceCollectionListener(BaseEventListener):
|
||||
|
||||
self._initialize_batch(user_context, execution_metadata)
|
||||
|
||||
def _initialize_flow_batch(self, source: Any, event: BaseEvent) -> None:
|
||||
def _initialize_flow_batch(self, source: Any, event: Any) -> None:
|
||||
"""Initialize trace batch for Flow execution.
|
||||
|
||||
Args:
|
||||
@@ -646,7 +645,7 @@ class TraceCollectionListener(BaseEventListener):
|
||||
user_context = self._get_user_context()
|
||||
execution_metadata = {
|
||||
"flow_name": getattr(event, "flow_name", "Unknown Flow"),
|
||||
"execution_start": event.timestamp,
|
||||
"execution_start": event.timestamp if hasattr(event, "timestamp") else None,
|
||||
"crewai_version": get_crewai_version(),
|
||||
"execution_type": "flow",
|
||||
}
|
||||
@@ -715,16 +714,18 @@ class TraceCollectionListener(BaseEventListener):
|
||||
self.batch_manager.end_event_processing()
|
||||
|
||||
def _create_trace_event(
|
||||
self, event_type: str, source: Any, event: BaseEvent
|
||||
self, event_type: str, source: Any, event: Any
|
||||
) -> TraceEvent:
|
||||
"""Create a trace event with ordering information."""
|
||||
trace_event = TraceEvent(
|
||||
type=event_type,
|
||||
timestamp=event.timestamp.isoformat() if event.timestamp else "",
|
||||
event_id=event.event_id,
|
||||
emission_sequence=event.emission_sequence,
|
||||
parent_event_id=event.parent_event_id,
|
||||
)
|
||||
"""Create a trace event"""
|
||||
if hasattr(event, "timestamp") and event.timestamp:
|
||||
trace_event = TraceEvent(
|
||||
type=event_type,
|
||||
timestamp=event.timestamp.isoformat(),
|
||||
)
|
||||
else:
|
||||
trace_event = TraceEvent(
|
||||
type=event_type,
|
||||
)
|
||||
|
||||
trace_event.event_data = self._build_event_data(event_type, event, source)
|
||||
|
||||
@@ -777,8 +778,10 @@ class TraceCollectionListener(BaseEventListener):
|
||||
}
|
||||
if event_type == "llm_call_started":
|
||||
event_data = safe_serialize_to_dict(event)
|
||||
event_data["task_name"] = event.task_name or getattr(
|
||||
event, "task_description", None
|
||||
event_data["task_name"] = (
|
||||
event.task_name or event.task_description
|
||||
if hasattr(event, "task_name") and event.task_name
|
||||
else None
|
||||
)
|
||||
return event_data
|
||||
if event_type == "llm_call_completed":
|
||||
|
||||
@@ -15,8 +15,5 @@ class TraceEvent:
|
||||
type: str = ""
|
||||
event_data: dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
emission_sequence: int | None = None
|
||||
parent_event_id: str | None = None
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
return asdict(self)
|
||||
|
||||
@@ -30,9 +30,7 @@ from pydantic import BaseModel, Field, ValidationError
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
|
||||
from crewai.events.base_events import reset_emission_counter
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.event_context import get_current_parent_id
|
||||
from crewai.events.listeners.tracing.trace_listener import (
|
||||
TraceCollectionListener,
|
||||
)
|
||||
@@ -75,7 +73,6 @@ from crewai.flow.utils import (
|
||||
is_simple_flow_condition,
|
||||
)
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.flow.async_feedback.types import PendingFeedbackContext
|
||||
from crewai.flow.human_feedback import HumanFeedbackResult
|
||||
@@ -573,7 +570,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
flow_id: str,
|
||||
persistence: FlowPersistence | None = None,
|
||||
**kwargs: Any,
|
||||
) -> Flow[Any]:
|
||||
) -> "Flow[Any]":
|
||||
"""Create a Flow instance from a pending feedback state.
|
||||
|
||||
This classmethod is used to restore a flow that was paused waiting
|
||||
@@ -634,7 +631,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
return instance
|
||||
|
||||
@property
|
||||
def pending_feedback(self) -> PendingFeedbackContext | None:
|
||||
def pending_feedback(self) -> "PendingFeedbackContext | None":
|
||||
"""Get the pending feedback context if this flow is waiting for feedback.
|
||||
|
||||
Returns:
|
||||
@@ -719,9 +716,8 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
Raises:
|
||||
ValueError: If no pending feedback context exists
|
||||
"""
|
||||
from datetime import datetime
|
||||
|
||||
from crewai.flow.human_feedback import HumanFeedbackResult
|
||||
from datetime import datetime
|
||||
|
||||
if self._pending_feedback_context is None:
|
||||
raise ValueError(
|
||||
@@ -748,7 +744,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
collapsed_outcome = self._collapse_to_outcome(
|
||||
feedback=feedback,
|
||||
outcomes=emit,
|
||||
llm=llm, # type: ignore[arg-type]
|
||||
llm=llm,
|
||||
)
|
||||
|
||||
# Create result
|
||||
@@ -796,13 +792,13 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
self._method_outputs.append(collapsed_outcome)
|
||||
|
||||
# Then trigger listeners for the outcome (e.g., "approved" triggers @listen("approved"))
|
||||
final_result = await self._execute_listeners( # type: ignore[func-returns-value]
|
||||
final_result = await self._execute_listeners(
|
||||
FlowMethodName(collapsed_outcome), # Use outcome as trigger
|
||||
result, # Pass HumanFeedbackResult to listeners
|
||||
)
|
||||
else:
|
||||
# Normal behavior - pass the HumanFeedbackResult
|
||||
final_result = await self._execute_listeners( # type: ignore[func-returns-value]
|
||||
final_result = await self._execute_listeners(
|
||||
FlowMethodName(context.method_name),
|
||||
result,
|
||||
)
|
||||
@@ -905,11 +901,11 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
model_fields = getattr(self.initial_state, "model_fields", None)
|
||||
if not model_fields or "id" not in model_fields:
|
||||
raise ValueError("Flow state model must have an 'id' field")
|
||||
instance = self.initial_state() # type: ignore[assignment]
|
||||
instance = self.initial_state()
|
||||
# Ensure id is set - generate UUID if empty
|
||||
if not getattr(instance, "id", None):
|
||||
object.__setattr__(instance, "id", str(uuid4()))
|
||||
return instance # type: ignore[return-value]
|
||||
return instance
|
||||
if self.initial_state is dict:
|
||||
return cast(T, {"id": str(uuid4())})
|
||||
|
||||
@@ -1330,9 +1326,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
if filtered_inputs:
|
||||
self._initialize_state(filtered_inputs)
|
||||
|
||||
if get_current_parent_id() is None:
|
||||
reset_emission_counter()
|
||||
|
||||
# Emit FlowStartedEvent and log the start of the flow.
|
||||
if not self.suppress_flow_events:
|
||||
future = crewai_event_bus.emit(
|
||||
@@ -2060,7 +2053,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
if isinstance(llm, str):
|
||||
llm_instance = LLM(model=llm)
|
||||
elif isinstance(llm, BaseLLMClass):
|
||||
llm_instance = llm # type: ignore[assignment]
|
||||
llm_instance = llm
|
||||
else:
|
||||
raise ValueError(f"Invalid llm type: {type(llm)}. Expected str or BaseLLM.")
|
||||
|
||||
@@ -2097,7 +2090,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
|
||||
try:
|
||||
parsed = json.loads(response)
|
||||
return parsed.get("outcome", outcomes[0]) # type: ignore[no-any-return]
|
||||
return parsed.get("outcome", outcomes[0])
|
||||
except json.JSONDecodeError:
|
||||
# Not valid JSON, might be raw outcome string
|
||||
response_clean = response.strip()
|
||||
@@ -2106,9 +2099,9 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
return outcome
|
||||
return outcomes[0]
|
||||
elif isinstance(response, FeedbackOutcome):
|
||||
return response.outcome # type: ignore[no-any-return]
|
||||
return response.outcome
|
||||
elif hasattr(response, "outcome"):
|
||||
return response.outcome # type: ignore[no-any-return]
|
||||
return response.outcome
|
||||
else:
|
||||
# Unexpected type, fall back to first outcome
|
||||
logger.warning(f"Unexpected response type: {type(response)}")
|
||||
|
||||
@@ -54,15 +54,21 @@ class GeminiCompletion(BaseLLM):
|
||||
safety_settings: dict[str, Any] | None = None,
|
||||
client_params: dict[str, Any] | None = None,
|
||||
interceptor: BaseInterceptor[Any, Any] | None = None,
|
||||
use_vertexai: bool | None = None,
|
||||
**kwargs: Any,
|
||||
):
|
||||
"""Initialize Google Gemini chat completion client.
|
||||
|
||||
Args:
|
||||
model: Gemini model name (e.g., 'gemini-2.0-flash-001', 'gemini-1.5-pro')
|
||||
api_key: Google API key (defaults to GOOGLE_API_KEY or GEMINI_API_KEY env var)
|
||||
project: Google Cloud project ID (for Vertex AI)
|
||||
location: Google Cloud location (for Vertex AI, defaults to 'us-central1')
|
||||
api_key: Google API key for Gemini API authentication.
|
||||
Defaults to GOOGLE_API_KEY or GEMINI_API_KEY env var.
|
||||
NOTE: Cannot be used with Vertex AI (project parameter). Use Gemini API instead.
|
||||
project: Google Cloud project ID for Vertex AI with ADC authentication.
|
||||
Requires Application Default Credentials (gcloud auth application-default login).
|
||||
NOTE: Vertex AI does NOT support API keys, only OAuth2/ADC.
|
||||
If both api_key and project are set, api_key takes precedence.
|
||||
location: Google Cloud location (for Vertex AI with ADC, defaults to 'us-central1')
|
||||
temperature: Sampling temperature (0-2)
|
||||
top_p: Nucleus sampling parameter
|
||||
top_k: Top-k sampling parameter
|
||||
@@ -73,6 +79,12 @@ class GeminiCompletion(BaseLLM):
|
||||
client_params: Additional parameters to pass to the Google Gen AI Client constructor.
|
||||
Supports parameters like http_options, credentials, debug_config, etc.
|
||||
interceptor: HTTP interceptor (not yet supported for Gemini).
|
||||
use_vertexai: Whether to use Vertex AI instead of Gemini API.
|
||||
- True: Use Vertex AI (with ADC or Express mode with API key)
|
||||
- False: Use Gemini API (explicitly override env var)
|
||||
- None (default): Check GOOGLE_GENAI_USE_VERTEXAI env var
|
||||
When using Vertex AI with API key (Express mode), http_options with
|
||||
api_version="v1" is automatically configured.
|
||||
**kwargs: Additional parameters
|
||||
"""
|
||||
if interceptor is not None:
|
||||
@@ -95,7 +107,8 @@ class GeminiCompletion(BaseLLM):
|
||||
self.project = project or os.getenv("GOOGLE_CLOUD_PROJECT")
|
||||
self.location = location or os.getenv("GOOGLE_CLOUD_LOCATION") or "us-central1"
|
||||
|
||||
use_vertexai = os.getenv("GOOGLE_GENAI_USE_VERTEXAI", "").lower() == "true"
|
||||
if use_vertexai is None:
|
||||
use_vertexai = os.getenv("GOOGLE_GENAI_USE_VERTEXAI", "").lower() == "true"
|
||||
|
||||
self.client = self._initialize_client(use_vertexai)
|
||||
|
||||
@@ -146,13 +159,34 @@ class GeminiCompletion(BaseLLM):
|
||||
|
||||
Returns:
|
||||
Initialized Google Gen AI Client
|
||||
|
||||
Note:
|
||||
Google Gen AI SDK has two distinct endpoints with different auth requirements:
|
||||
- Gemini API (generativelanguage.googleapis.com): Supports API key authentication
|
||||
- Vertex AI (aiplatform.googleapis.com): Only supports OAuth2/ADC, NO API keys
|
||||
|
||||
When vertexai=True is set, it routes to aiplatform.googleapis.com which rejects
|
||||
API keys. Use Gemini API endpoint for API key authentication instead.
|
||||
"""
|
||||
client_params = {}
|
||||
|
||||
if self.client_params:
|
||||
client_params.update(self.client_params)
|
||||
|
||||
if use_vertexai or self.project:
|
||||
# Determine authentication mode based on available credentials
|
||||
has_api_key = bool(self.api_key)
|
||||
has_project = bool(self.project)
|
||||
|
||||
if has_api_key and has_project:
|
||||
logging.warning(
|
||||
"Both API key and project provided. Using API key authentication. "
|
||||
"Project/location parameters are ignored when using API keys. "
|
||||
"To use Vertex AI with ADC, remove the api_key parameter."
|
||||
)
|
||||
has_project = False
|
||||
|
||||
# Vertex AI with ADC (project without API key)
|
||||
if (use_vertexai or has_project) and not has_api_key:
|
||||
client_params.update(
|
||||
{
|
||||
"vertexai": True,
|
||||
@@ -161,12 +195,20 @@ class GeminiCompletion(BaseLLM):
|
||||
}
|
||||
)
|
||||
|
||||
client_params.pop("api_key", None)
|
||||
|
||||
elif self.api_key:
|
||||
# API key authentication (works with both Gemini API and Vertex AI Express)
|
||||
elif has_api_key:
|
||||
client_params["api_key"] = self.api_key
|
||||
|
||||
client_params.pop("vertexai", None)
|
||||
# Vertex AI Express mode: API key + vertexai=True + http_options with api_version="v1"
|
||||
# See: https://cloud.google.com/vertex-ai/generative-ai/docs/start/quickstart?usertype=apikey
|
||||
if use_vertexai:
|
||||
client_params["vertexai"] = True
|
||||
client_params["http_options"] = types.HttpOptions(api_version="v1")
|
||||
else:
|
||||
# This ensures we use the Gemini API (generativelanguage.googleapis.com)
|
||||
client_params["vertexai"] = False
|
||||
|
||||
# Clean up project/location (not allowed with API key)
|
||||
client_params.pop("project", None)
|
||||
client_params.pop("location", None)
|
||||
|
||||
@@ -175,10 +217,13 @@ class GeminiCompletion(BaseLLM):
|
||||
return genai.Client(**client_params)
|
||||
except Exception as e:
|
||||
raise ValueError(
|
||||
"Either GOOGLE_API_KEY/GEMINI_API_KEY (for Gemini API) or "
|
||||
"GOOGLE_CLOUD_PROJECT (for Vertex AI) must be set"
|
||||
"Authentication required. Provide one of:\n"
|
||||
" 1. API key via GOOGLE_API_KEY or GEMINI_API_KEY environment variable\n"
|
||||
" (use_vertexai=True is optional for Vertex AI with API key)\n"
|
||||
" 2. For Vertex AI with ADC: Set GOOGLE_CLOUD_PROJECT and run:\n"
|
||||
" gcloud auth application-default login\n"
|
||||
" 3. Pass api_key parameter directly to LLM constructor\n"
|
||||
) from e
|
||||
|
||||
return genai.Client(**client_params)
|
||||
|
||||
def _get_client_params(self) -> dict[str, Any]:
|
||||
@@ -202,6 +247,8 @@ class GeminiCompletion(BaseLLM):
|
||||
"location": self.location,
|
||||
}
|
||||
)
|
||||
if self.api_key:
|
||||
params["api_key"] = self.api_key
|
||||
elif self.api_key:
|
||||
params["api_key"] = self.api_key
|
||||
|
||||
|
||||
@@ -241,9 +241,6 @@ class ToolUsage:
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
|
||||
started_at = time.time()
|
||||
started_event_emitted = False
|
||||
|
||||
if self.agent:
|
||||
event_data = {
|
||||
"agent_key": self.agent.key,
|
||||
@@ -261,162 +258,151 @@ class ToolUsage:
|
||||
event_data["task_name"] = self.task.name or self.task.description
|
||||
event_data["task_id"] = str(self.task.id)
|
||||
crewai_event_bus.emit(self, ToolUsageStartedEvent(**event_data))
|
||||
started_event_emitted = True
|
||||
|
||||
started_at = time.time()
|
||||
from_cache = False
|
||||
result = None # type: ignore
|
||||
should_retry = False
|
||||
available_tool = None
|
||||
|
||||
try:
|
||||
if self.tools_handler and self.tools_handler.cache:
|
||||
input_str = ""
|
||||
if calling.arguments:
|
||||
if isinstance(calling.arguments, dict):
|
||||
input_str = json.dumps(calling.arguments)
|
||||
else:
|
||||
input_str = str(calling.arguments)
|
||||
if self.tools_handler and self.tools_handler.cache:
|
||||
input_str = ""
|
||||
if calling.arguments:
|
||||
if isinstance(calling.arguments, dict):
|
||||
input_str = json.dumps(calling.arguments)
|
||||
else:
|
||||
input_str = str(calling.arguments)
|
||||
|
||||
result = self.tools_handler.cache.read(
|
||||
tool=calling.tool_name, input=input_str
|
||||
) # type: ignore
|
||||
from_cache = result is not None
|
||||
result = self.tools_handler.cache.read(
|
||||
tool=calling.tool_name, input=input_str
|
||||
) # type: ignore
|
||||
from_cache = result is not None
|
||||
|
||||
available_tool = next(
|
||||
(
|
||||
available_tool
|
||||
for available_tool in self.tools
|
||||
if available_tool.name == tool.name
|
||||
),
|
||||
None,
|
||||
)
|
||||
available_tool = next(
|
||||
(
|
||||
available_tool
|
||||
for available_tool in self.tools
|
||||
if available_tool.name == tool.name
|
||||
),
|
||||
None,
|
||||
)
|
||||
|
||||
usage_limit_error = self._check_usage_limit(available_tool, tool.name)
|
||||
if usage_limit_error:
|
||||
usage_limit_error = self._check_usage_limit(available_tool, tool.name)
|
||||
if usage_limit_error:
|
||||
try:
|
||||
result = usage_limit_error
|
||||
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
|
||||
result = self._format_result(result=result)
|
||||
# Don't return early - fall through to finally block
|
||||
elif result is None:
|
||||
try:
|
||||
if calling.tool_name in [
|
||||
"Delegate work to coworker",
|
||||
"Ask question to coworker",
|
||||
]:
|
||||
coworker = (
|
||||
calling.arguments.get("coworker")
|
||||
if calling.arguments
|
||||
else None
|
||||
)
|
||||
if self.task:
|
||||
self.task.increment_delegations(coworker)
|
||||
return self._format_result(result=result)
|
||||
except Exception:
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
|
||||
if calling.arguments:
|
||||
try:
|
||||
acceptable_args = tool.args_schema.model_json_schema()[
|
||||
"properties"
|
||||
].keys()
|
||||
arguments = {
|
||||
k: v
|
||||
for k, v in calling.arguments.items()
|
||||
if k in acceptable_args
|
||||
}
|
||||
arguments = self._add_fingerprint_metadata(arguments)
|
||||
result = await tool.ainvoke(input=arguments)
|
||||
except Exception:
|
||||
arguments = calling.arguments
|
||||
arguments = self._add_fingerprint_metadata(arguments)
|
||||
result = await tool.ainvoke(input=arguments)
|
||||
else:
|
||||
arguments = self._add_fingerprint_metadata({})
|
||||
result = await tool.ainvoke(input=arguments)
|
||||
|
||||
if self.tools_handler:
|
||||
should_cache = True
|
||||
if (
|
||||
hasattr(available_tool, "cache_function")
|
||||
and available_tool.cache_function
|
||||
):
|
||||
should_cache = available_tool.cache_function(
|
||||
calling.arguments, result
|
||||
)
|
||||
|
||||
self.tools_handler.on_tool_use(
|
||||
calling=calling, output=result, should_cache=should_cache
|
||||
)
|
||||
|
||||
self._telemetry.tool_usage(
|
||||
llm=self.function_calling_llm,
|
||||
tool_name=tool.name,
|
||||
attempts=self._run_attempts,
|
||||
if result is None:
|
||||
try:
|
||||
if calling.tool_name in [
|
||||
"Delegate work to coworker",
|
||||
"Ask question to coworker",
|
||||
]:
|
||||
coworker = (
|
||||
calling.arguments.get("coworker") if calling.arguments else None
|
||||
)
|
||||
result = self._format_result(result=result)
|
||||
data = {
|
||||
"result": result,
|
||||
"tool_name": tool.name,
|
||||
"tool_args": calling.arguments,
|
||||
}
|
||||
if self.task:
|
||||
self.task.increment_delegations(coworker)
|
||||
|
||||
if (
|
||||
hasattr(available_tool, "result_as_answer")
|
||||
and available_tool.result_as_answer
|
||||
):
|
||||
result_as_answer = available_tool.result_as_answer
|
||||
data["result_as_answer"] = result_as_answer
|
||||
if calling.arguments:
|
||||
try:
|
||||
acceptable_args = tool.args_schema.model_json_schema()[
|
||||
"properties"
|
||||
].keys()
|
||||
arguments = {
|
||||
k: v
|
||||
for k, v in calling.arguments.items()
|
||||
if k in acceptable_args
|
||||
}
|
||||
arguments = self._add_fingerprint_metadata(arguments)
|
||||
result = await tool.ainvoke(input=arguments)
|
||||
except Exception:
|
||||
arguments = calling.arguments
|
||||
arguments = self._add_fingerprint_metadata(arguments)
|
||||
result = await tool.ainvoke(input=arguments)
|
||||
else:
|
||||
arguments = self._add_fingerprint_metadata({})
|
||||
result = await tool.ainvoke(input=arguments)
|
||||
except Exception as e:
|
||||
self.on_tool_error(tool=tool, tool_calling=calling, e=e)
|
||||
self._run_attempts += 1
|
||||
if self._run_attempts > self._max_parsing_attempts:
|
||||
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
|
||||
error_message = self._i18n.errors("tool_usage_exception").format(
|
||||
error=e, tool=tool.name, tool_inputs=tool.description
|
||||
)
|
||||
error = ToolUsageError(
|
||||
f"\n{error_message}.\nMoving on then. {self._i18n.slice('format').format(tool_names=self.tools_names)}"
|
||||
).message
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
if self.agent and self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=f"\n\n{error_message}\n", color="red"
|
||||
)
|
||||
return error
|
||||
|
||||
if self.agent and hasattr(self.agent, "tools_results"):
|
||||
self.agent.tools_results.append(data)
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
return await self.ause(calling=calling, tool_string=tool_string)
|
||||
|
||||
if available_tool and hasattr(
|
||||
available_tool, "current_usage_count"
|
||||
):
|
||||
available_tool.current_usage_count += 1
|
||||
if (
|
||||
hasattr(available_tool, "max_usage_count")
|
||||
and available_tool.max_usage_count is not None
|
||||
):
|
||||
self._printer.print(
|
||||
content=f"Tool '{available_tool.name}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}",
|
||||
color="blue",
|
||||
)
|
||||
if self.tools_handler:
|
||||
should_cache = True
|
||||
if (
|
||||
hasattr(available_tool, "cache_function")
|
||||
and available_tool.cache_function
|
||||
):
|
||||
should_cache = available_tool.cache_function(
|
||||
calling.arguments, result
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.on_tool_error(tool=tool, tool_calling=calling, e=e)
|
||||
self._run_attempts += 1
|
||||
if self._run_attempts > self._max_parsing_attempts:
|
||||
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
|
||||
error_message = self._i18n.errors(
|
||||
"tool_usage_exception"
|
||||
).format(error=e, tool=tool.name, tool_inputs=tool.description)
|
||||
result = ToolUsageError(
|
||||
f"\n{error_message}.\nMoving on then. {self._i18n.slice('format').format(tool_names=self.tools_names)}"
|
||||
).message
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
if self.agent and self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=f"\n\n{error_message}\n", color="red"
|
||||
)
|
||||
else:
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
should_retry = True
|
||||
else:
|
||||
result = self._format_result(result=result)
|
||||
|
||||
finally:
|
||||
if started_event_emitted:
|
||||
self.on_tool_use_finished(
|
||||
tool=tool,
|
||||
tool_calling=calling,
|
||||
from_cache=from_cache,
|
||||
started_at=started_at,
|
||||
result=result,
|
||||
self.tools_handler.on_tool_use(
|
||||
calling=calling, output=result, should_cache=should_cache
|
||||
)
|
||||
|
||||
# Handle retry after finally block ensures finished event was emitted
|
||||
if should_retry:
|
||||
return await self.ause(calling=calling, tool_string=tool_string)
|
||||
self._telemetry.tool_usage(
|
||||
llm=self.function_calling_llm,
|
||||
tool_name=tool.name,
|
||||
attempts=self._run_attempts,
|
||||
)
|
||||
result = self._format_result(result=result)
|
||||
data = {
|
||||
"result": result,
|
||||
"tool_name": tool.name,
|
||||
"tool_args": calling.arguments,
|
||||
}
|
||||
|
||||
self.on_tool_use_finished(
|
||||
tool=tool,
|
||||
tool_calling=calling,
|
||||
from_cache=from_cache,
|
||||
started_at=started_at,
|
||||
result=result,
|
||||
)
|
||||
|
||||
if (
|
||||
hasattr(available_tool, "result_as_answer")
|
||||
and available_tool.result_as_answer # type: ignore
|
||||
):
|
||||
result_as_answer = available_tool.result_as_answer # type: ignore
|
||||
data["result_as_answer"] = result_as_answer # type: ignore
|
||||
|
||||
if self.agent and hasattr(self.agent, "tools_results"):
|
||||
self.agent.tools_results.append(data)
|
||||
|
||||
if available_tool and hasattr(available_tool, "current_usage_count"):
|
||||
available_tool.current_usage_count += 1
|
||||
if (
|
||||
hasattr(available_tool, "max_usage_count")
|
||||
and available_tool.max_usage_count is not None
|
||||
):
|
||||
self._printer.print(
|
||||
content=f"Tool '{available_tool.name}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}",
|
||||
color="blue",
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
@@ -426,7 +412,6 @@ class ToolUsage:
|
||||
tool: CrewStructuredTool,
|
||||
calling: ToolCalling | InstructorToolCalling,
|
||||
) -> str:
|
||||
# Repeated usage check happens before event emission - safe to return early
|
||||
if self._check_tool_repeated_usage(calling=calling):
|
||||
try:
|
||||
result = self._i18n.errors("task_repeated_usage").format(
|
||||
@@ -443,9 +428,6 @@ class ToolUsage:
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
|
||||
started_at = time.time()
|
||||
started_event_emitted = False
|
||||
|
||||
if self.agent:
|
||||
event_data = {
|
||||
"agent_key": self.agent.key,
|
||||
@@ -464,162 +446,155 @@ class ToolUsage:
|
||||
event_data["task_name"] = self.task.name or self.task.description
|
||||
event_data["task_id"] = str(self.task.id)
|
||||
crewai_event_bus.emit(self, ToolUsageStartedEvent(**event_data))
|
||||
started_event_emitted = True
|
||||
|
||||
started_at = time.time()
|
||||
from_cache = False
|
||||
result = None # type: ignore
|
||||
should_retry = False
|
||||
available_tool = None
|
||||
|
||||
try:
|
||||
if self.tools_handler and self.tools_handler.cache:
|
||||
input_str = ""
|
||||
if calling.arguments:
|
||||
if isinstance(calling.arguments, dict):
|
||||
input_str = json.dumps(calling.arguments)
|
||||
else:
|
||||
input_str = str(calling.arguments)
|
||||
if self.tools_handler and self.tools_handler.cache:
|
||||
input_str = ""
|
||||
if calling.arguments:
|
||||
if isinstance(calling.arguments, dict):
|
||||
import json
|
||||
|
||||
result = self.tools_handler.cache.read(
|
||||
tool=calling.tool_name, input=input_str
|
||||
) # type: ignore
|
||||
from_cache = result is not None
|
||||
input_str = json.dumps(calling.arguments)
|
||||
else:
|
||||
input_str = str(calling.arguments)
|
||||
|
||||
available_tool = next(
|
||||
(
|
||||
available_tool
|
||||
for available_tool in self.tools
|
||||
if available_tool.name == tool.name
|
||||
),
|
||||
None,
|
||||
)
|
||||
result = self.tools_handler.cache.read(
|
||||
tool=calling.tool_name, input=input_str
|
||||
) # type: ignore
|
||||
from_cache = result is not None
|
||||
|
||||
usage_limit_error = self._check_usage_limit(available_tool, tool.name)
|
||||
if usage_limit_error:
|
||||
available_tool = next(
|
||||
(
|
||||
available_tool
|
||||
for available_tool in self.tools
|
||||
if available_tool.name == tool.name
|
||||
),
|
||||
None,
|
||||
)
|
||||
|
||||
usage_limit_error = self._check_usage_limit(available_tool, tool.name)
|
||||
if usage_limit_error:
|
||||
try:
|
||||
result = usage_limit_error
|
||||
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
|
||||
result = self._format_result(result=result)
|
||||
# Don't return early - fall through to finally block
|
||||
elif result is None:
|
||||
try:
|
||||
if calling.tool_name in [
|
||||
"Delegate work to coworker",
|
||||
"Ask question to coworker",
|
||||
]:
|
||||
coworker = (
|
||||
calling.arguments.get("coworker")
|
||||
if calling.arguments
|
||||
else None
|
||||
)
|
||||
if self.task:
|
||||
self.task.increment_delegations(coworker)
|
||||
return self._format_result(result=result)
|
||||
except Exception:
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
|
||||
if calling.arguments:
|
||||
try:
|
||||
acceptable_args = tool.args_schema.model_json_schema()[
|
||||
"properties"
|
||||
].keys()
|
||||
arguments = {
|
||||
k: v
|
||||
for k, v in calling.arguments.items()
|
||||
if k in acceptable_args
|
||||
}
|
||||
arguments = self._add_fingerprint_metadata(arguments)
|
||||
result = tool.invoke(input=arguments)
|
||||
except Exception:
|
||||
arguments = calling.arguments
|
||||
arguments = self._add_fingerprint_metadata(arguments)
|
||||
result = tool.invoke(input=arguments)
|
||||
else:
|
||||
arguments = self._add_fingerprint_metadata({})
|
||||
result = tool.invoke(input=arguments)
|
||||
|
||||
if self.tools_handler:
|
||||
should_cache = True
|
||||
if (
|
||||
hasattr(available_tool, "cache_function")
|
||||
and available_tool.cache_function
|
||||
):
|
||||
should_cache = available_tool.cache_function(
|
||||
calling.arguments, result
|
||||
)
|
||||
|
||||
self.tools_handler.on_tool_use(
|
||||
calling=calling, output=result, should_cache=should_cache
|
||||
)
|
||||
|
||||
self._telemetry.tool_usage(
|
||||
llm=self.function_calling_llm,
|
||||
tool_name=tool.name,
|
||||
attempts=self._run_attempts,
|
||||
if result is None:
|
||||
try:
|
||||
if calling.tool_name in [
|
||||
"Delegate work to coworker",
|
||||
"Ask question to coworker",
|
||||
]:
|
||||
coworker = (
|
||||
calling.arguments.get("coworker") if calling.arguments else None
|
||||
)
|
||||
result = self._format_result(result=result)
|
||||
data = {
|
||||
"result": result,
|
||||
"tool_name": tool.name,
|
||||
"tool_args": calling.arguments,
|
||||
}
|
||||
if self.task:
|
||||
self.task.increment_delegations(coworker)
|
||||
|
||||
if (
|
||||
hasattr(available_tool, "result_as_answer")
|
||||
and available_tool.result_as_answer
|
||||
):
|
||||
result_as_answer = available_tool.result_as_answer
|
||||
data["result_as_answer"] = result_as_answer
|
||||
if calling.arguments:
|
||||
try:
|
||||
acceptable_args = tool.args_schema.model_json_schema()[
|
||||
"properties"
|
||||
].keys()
|
||||
arguments = {
|
||||
k: v
|
||||
for k, v in calling.arguments.items()
|
||||
if k in acceptable_args
|
||||
}
|
||||
# Add fingerprint metadata if available
|
||||
arguments = self._add_fingerprint_metadata(arguments)
|
||||
result = tool.invoke(input=arguments)
|
||||
except Exception:
|
||||
arguments = calling.arguments
|
||||
# Add fingerprint metadata if available
|
||||
arguments = self._add_fingerprint_metadata(arguments)
|
||||
result = tool.invoke(input=arguments)
|
||||
else:
|
||||
# Add fingerprint metadata even to empty arguments
|
||||
arguments = self._add_fingerprint_metadata({})
|
||||
result = tool.invoke(input=arguments)
|
||||
except Exception as e:
|
||||
self.on_tool_error(tool=tool, tool_calling=calling, e=e)
|
||||
self._run_attempts += 1
|
||||
if self._run_attempts > self._max_parsing_attempts:
|
||||
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
|
||||
error_message = self._i18n.errors("tool_usage_exception").format(
|
||||
error=e, tool=tool.name, tool_inputs=tool.description
|
||||
)
|
||||
error = ToolUsageError(
|
||||
f"\n{error_message}.\nMoving on then. {self._i18n.slice('format').format(tool_names=self.tools_names)}"
|
||||
).message
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
if self.agent and self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=f"\n\n{error_message}\n", color="red"
|
||||
)
|
||||
return error
|
||||
|
||||
if self.agent and hasattr(self.agent, "tools_results"):
|
||||
self.agent.tools_results.append(data)
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
return self.use(calling=calling, tool_string=tool_string)
|
||||
|
||||
if available_tool and hasattr(
|
||||
available_tool, "current_usage_count"
|
||||
):
|
||||
available_tool.current_usage_count += 1
|
||||
if (
|
||||
hasattr(available_tool, "max_usage_count")
|
||||
and available_tool.max_usage_count is not None
|
||||
):
|
||||
self._printer.print(
|
||||
content=f"Tool '{available_tool.name}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}",
|
||||
color="blue",
|
||||
)
|
||||
if self.tools_handler:
|
||||
should_cache = True
|
||||
if (
|
||||
hasattr(available_tool, "cache_function")
|
||||
and available_tool.cache_function
|
||||
):
|
||||
should_cache = available_tool.cache_function(
|
||||
calling.arguments, result
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.on_tool_error(tool=tool, tool_calling=calling, e=e)
|
||||
self._run_attempts += 1
|
||||
if self._run_attempts > self._max_parsing_attempts:
|
||||
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
|
||||
error_message = self._i18n.errors(
|
||||
"tool_usage_exception"
|
||||
).format(error=e, tool=tool.name, tool_inputs=tool.description)
|
||||
result = ToolUsageError(
|
||||
f"\n{error_message}.\nMoving on then. {self._i18n.slice('format').format(tool_names=self.tools_names)}"
|
||||
).message
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
if self.agent and self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=f"\n\n{error_message}\n", color="red"
|
||||
)
|
||||
else:
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
should_retry = True
|
||||
else:
|
||||
result = self._format_result(result=result)
|
||||
|
||||
finally:
|
||||
if started_event_emitted:
|
||||
self.on_tool_use_finished(
|
||||
tool=tool,
|
||||
tool_calling=calling,
|
||||
from_cache=from_cache,
|
||||
started_at=started_at,
|
||||
result=result,
|
||||
self.tools_handler.on_tool_use(
|
||||
calling=calling, output=result, should_cache=should_cache
|
||||
)
|
||||
self._telemetry.tool_usage(
|
||||
llm=self.function_calling_llm,
|
||||
tool_name=tool.name,
|
||||
attempts=self._run_attempts,
|
||||
)
|
||||
result = self._format_result(result=result)
|
||||
data = {
|
||||
"result": result,
|
||||
"tool_name": tool.name,
|
||||
"tool_args": calling.arguments,
|
||||
}
|
||||
|
||||
# Handle retry after finally block ensures finished event was emitted
|
||||
if should_retry:
|
||||
return self.use(calling=calling, tool_string=tool_string)
|
||||
self.on_tool_use_finished(
|
||||
tool=tool,
|
||||
tool_calling=calling,
|
||||
from_cache=from_cache,
|
||||
started_at=started_at,
|
||||
result=result,
|
||||
)
|
||||
|
||||
if (
|
||||
hasattr(available_tool, "result_as_answer")
|
||||
and available_tool.result_as_answer # type: ignore # Item "None" of "Any | None" has no attribute "cache_function"
|
||||
):
|
||||
result_as_answer = available_tool.result_as_answer # type: ignore # Item "None" of "Any | None" has no attribute "result_as_answer"
|
||||
data["result_as_answer"] = result_as_answer # type: ignore
|
||||
|
||||
if self.agent and hasattr(self.agent, "tools_results"):
|
||||
self.agent.tools_results.append(data)
|
||||
|
||||
if available_tool and hasattr(available_tool, "current_usage_count"):
|
||||
available_tool.current_usage_count += 1
|
||||
if (
|
||||
hasattr(available_tool, "max_usage_count")
|
||||
and available_tool.max_usage_count is not None
|
||||
):
|
||||
self._printer.print(
|
||||
content=f"Tool '{available_tool.name}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}",
|
||||
color="blue",
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@@ -1,115 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Helper. You help.\nYour
|
||||
personal goal is: Help with tasks\nTo give my best complete final answer to
|
||||
the task respond using the exact following format:\n\nThought: I now can give
|
||||
a great answer\nFinal Answer: Your final answer must be the great and the most
|
||||
complete as possible, it must be outcome described.\n\nI MUST use these formats,
|
||||
my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''done''
|
||||
and nothing else.\n\nThis is the expected criteria for your final answer: The
|
||||
word done.\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '794'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFLBbtswDL37Kwid4yJOnTr1regwrMN2C7DDVhisTNtaZUmQ6GZFkX8f
|
||||
ZKexu3XALgbMx/f0HsmXBECoWpQgZIcse6fTD+t12B1QfpN+n/Ht/stjc2icvvsaPj1/FqvIsA8/
|
||||
SfIr60La3mliZc0ES0/IFFWz4mq3u94Wm3wEeluTjrTWcZpfZGmvjEo36802Xedplp/onVWSgijh
|
||||
ewIA8DJ+o1FT0y9Rwnr1WukpBGxJlOcmAOGtjhWBIajAaFisZlBaw2RG7/vODm3HJdyBsQeQaKBV
|
||||
TwQIbQwAaMKB/A/zURnUcDP+lVBbQ0tBT80QMKYyg9YLAI2xjHEqY5T7E3I8m9e2dd4+hD+oolFG
|
||||
ha7yhMGaaDSwdWJEjwnA/Tik4U1u4bztHVdsH2l8LtteT3piXs4CzU8gW0a9qBeXq3f0qpoYlQ6L
|
||||
MQuJsqN6ps47waFWdgEki9R/u3lPe0quTPs/8jMgJTmmunKeaiXfJp7bPMXb/VfbecqjYRHIPylJ
|
||||
FSvycRM1NTjo6aBEeA5MfdUo05J3Xk1X1bjqsrjCQkpqMpEck98AAAD//wMAnStaOGQDAAA=
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 20 Jan 2026 07:55:25 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '601'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '628'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -1,115 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Helper. You help.\nYour
|
||||
personal goal is: Help with tasks\nTo give my best complete final answer to
|
||||
the task respond using the exact following format:\n\nThought: I now can give
|
||||
a great answer\nFinal Answer: Your final answer must be the great and the most
|
||||
complete as possible, it must be outcome described.\n\nI MUST use these formats,
|
||||
my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''hi''
|
||||
and nothing else.\n\nThis is the expected criteria for your final answer: The
|
||||
word hi.\nyou MUST return the actual complete content as the final answer, not
|
||||
a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '790'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFLBbtswDL37Kwid48JOk7rxLUAxdIdil24osBUGI9M2N1kSJLnpUOTf
|
||||
Bzlp7HYdsIsB8/E9vUfyJQEQXIsShOwwyN6q9CbL/Gb7sH/YfZUFP+Pdt1u/VTdfbu8afS8WkWF2
|
||||
P0mGV9aFNL1VFNjoIywdYaComhdX19ebdbFcj0BvalKR1tqQri7ytGfN6TJbrtNslearE70zLMmL
|
||||
Er4nAAAv4zca1TU9ixKyxWulJ++xJVGemwCEMypWBHrPPqAOYjGB0uhAevR+35mh7UIJn0GbPUjU
|
||||
0PITAUIbAwBqvyf3Q39ijQq2418JHc/lHDWDx5hJD0rNANTaBIwzGYM8npDD2boyrXVm599RRcOa
|
||||
fVc5Qm90tOmDsWJEDwnA4zii4U1qYZ3pbaiC+UXjc/l6c9QT02pm6OoEBhNQzerF5eIDvaqmgKz8
|
||||
bMhCouyonqjTRnCo2cyAZJb6bzcfaR+Ts27/R34CpCQbqK6so5rl28RTm6N4uf9qO095NCw8uSeW
|
||||
VAUmFzdRU4ODOp6T8L99oL5qWLfkrOPjTTW2uiyusJCSmlwkh+QPAAAA//8DAASWsy5iAwAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 20 Jan 2026 07:55:26 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '369'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '391'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -1,115 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Responder. You give short
|
||||
answers.\nYour personal goal is: Respond briefly\nTo give my best complete final
|
||||
answer to the task respond using the exact following format:\n\nThought: I now
|
||||
can give a great answer\nFinal Answer: Your final answer must be the great and
|
||||
the most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say
|
||||
''yes'' and nothing else.\n\nThis is the expected criteria for your final answer:
|
||||
The word yes.\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '809'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFJda9wwEHz3r1j0fA72nT9SvxVC0mvfSimUNpiNvLbVypKQ5CRHuP9e
|
||||
ZF/OTpNAXwze2RnN7O5TBMBEwypgvEfPByPjqyRxN1efH2+w+DGkn/bF9a7Nv/f7/PA1+8I2gaHv
|
||||
fhP3z6wLrgcjyQutZphbQk9BNS2Ly8sPebnbTsCgG5KB1hkfZxdpPAgl4m2yzeMki9PsRO+14ORY
|
||||
BT8jAICn6RuMqoYeWQXJ5rkykHPYEavOTQDMahkqDJ0TzqPybLOAXCtPavL+rddj1/sK9qD0A3BU
|
||||
0Il7AoQuBABU7oHsL3UtFEr4OP1VcCC31rPUjg5DKDVKuQJQKe0xDGVKcntCjmfvUnfG6jv3D5W1
|
||||
QgnX15bQaRV8Oq8Nm9BjBHA7zWh8EZsZqwfja6//0PRcWqSzHlt2s0KzE+i1R7mql/nmDb26IY9C
|
||||
utWUGUfeU7NQl5Xg2Ai9AqJV6tdu3tKekwvV/Y/8AnBOxlNTG0uN4C8TL22Wwum+13ae8mSYObL3
|
||||
glPtBdmwiYZaHOV8T8wdnKehboXqyBor5qNqTb0rCyw5pzZl0TH6CwAA//8DAPUTEd9jAwAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 20 Jan 2026 07:55:33 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '418'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '434'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -1,115 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Responder. You give short
|
||||
answers.\nYour personal goal is: Respond briefly\nTo give my best complete final
|
||||
answer to the task respond using the exact following format:\n\nThought: I now
|
||||
can give a great answer\nFinal Answer: Your final answer must be the great and
|
||||
the most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say
|
||||
''hello'' and nothing else.\n\nThis is the expected criteria for your final
|
||||
answer: The word hello.\nyou MUST return the actual complete content as the
|
||||
final answer, not a summary.\n\nBegin! This is VERY important to you, use the
|
||||
tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '813'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA4xSTW/bMAy9+1cQOsdFnOarvm0YtvWyU4EdtsJgZdrWJouCRKcrivz3QU4au1sH
|
||||
7GLAfHxP75F8zgCUqVUJSncouvc2/7Bcxk9PX776fs+fe2zu+PYgZLvu/VZu1CIx+OEHaXlhXWnu
|
||||
vSUx7E6wDoRCSbXYbff7m83uejUCPddkE631kq+virw3zuSr5WqTL9d5sT7TOzaaoirhWwYA8Dx+
|
||||
k1FX0y9VwnLxUukpRmxJlZcmABXYporCGE0UdKIWE6jZCbnR+13HQ9tJCbfg+BE0OmjNgQChTQEA
|
||||
XXyk8N19NA4tvBv/SujIWp4rBmqGiCmWG6ydAegcC6axjFnuz8jx4t5y6wM/xD+oqjHOxK4KhJFd
|
||||
chqFvRrRYwZwP05peBVc+cC9l0r4J43PFdvipKem7czQ9RkUFrSz+m6zeEOvqknQ2Dibs9KoO6on
|
||||
6rQUHGrDMyCbpf7bzVvap+TGtf8jPwFakxeqKx+oNvp14qktUDref7VdpjwaVpHCwWiqxFBIm6ip
|
||||
wcGeLkrFpyjUV41xLQUfzOmsGl9d77a405qaQmXH7DcAAAD//wMAQklYDmUDAAA=
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 20 Jan 2026 07:55:32 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '581'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '619'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -1,115 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Responder. You give short
|
||||
answers.\nYour personal goal is: Respond briefly\nTo give my best complete final
|
||||
answer to the task respond using the exact following format:\n\nThought: I now
|
||||
can give a great answer\nFinal Answer: Your final answer must be the great and
|
||||
the most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say
|
||||
''ok'' and nothing else.\n\nThis is the expected criteria for your final answer:
|
||||
The word ok.\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '807'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFJda9wwEHz3r1j0fA72fTnxW6GU9iglD4FC22A28tpWI0tCWicN4f57
|
||||
kX05O20KfTF4Z2c0s7vPCYBQtShByA5Z9k6n77MsfCRyX9ZDsTnsrp/Wh5v++uu37urzoRWryLB3
|
||||
P0nyC+tC2t5pYmXNBEtPyBRV82J/eXm1KzabEehtTTrSWsfp9iJPe2VUus7WuzTbpvn2RO+skhRE
|
||||
Cd8TAIDn8RuNmpp+iRKy1UulpxCwJVGemwCEtzpWBIagAqNhsZpBaQ2TGb3fdHZoOy7hExj7CBIN
|
||||
tOqBAKGNAQBNeCT/w3xQBjW8G/9KsPdLOU/NEDBmMoPWCwCNsYxxJmOQ2xNyPFvXtnXe3oU/qKJR
|
||||
RoWu8oTBmmgzsHViRI8JwO04ouFVauG87R1XbO9pfC7f55OemFezQLcnkC2jXtSL3eoNvaomRqXD
|
||||
YshCouyonqnzRnColV0AySL1327e0p6SK9P+j/wMSEmOqa6cp1rJ14nnNk/xcv/Vdp7yaFgE8g9K
|
||||
UsWKfNxETQ0OejonEZ4CU181yrTknVfTTTWu2hR7LKSkJhfJMfkNAAD//wMAw/X5HWIDAAA=
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 20 Jan 2026 07:55:34 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '499'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '517'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,75 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"contents": [{"parts": [{"text": "\nCurrent Task: What is the capital
|
||||
of Japan?\n\nThis is the expected criteria for your final answer: The capital
|
||||
of Japan\nyou MUST return the actual complete content as the final answer, not
|
||||
a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}], "role":
|
||||
"user"}], "systemInstruction": {"parts": [{"text": "You are Research Assistant.
|
||||
You are a helpful research assistant.\nYour personal goal is: Find information
|
||||
about the capital of Japan\nTo give my best complete final answer to the task
|
||||
respond using the exact following format:\n\nThought: I now can give a great
|
||||
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||
depends on it!"}], "role": "user"}, "generationConfig": {"stopSequences": ["\nObservation:"]}}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- '*/*'
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '952'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- aiplatform.googleapis.com
|
||||
x-goog-api-client:
|
||||
- google-genai-sdk/1.59.0 gl-python/3.13.3
|
||||
x-goog-api-key:
|
||||
- X-GOOG-API-KEY-XXX
|
||||
method: POST
|
||||
uri: https://aiplatform.googleapis.com/v1/publishers/google/models/gemini-2.0-flash-exp:generateContent
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"candidates\": [\n {\n \"content\": {\n \"role\":
|
||||
\"model\",\n \"parts\": [\n {\n \"text\": \"The
|
||||
capital of Japan is Tokyo.\\nFinal Answer: Tokyo\\n\"\n }\n ]\n
|
||||
\ },\n \"finishReason\": \"STOP\",\n \"avgLogprobs\": -0.017845841554495003\n
|
||||
\ }\n ],\n \"usageMetadata\": {\n \"promptTokenCount\": 163,\n \"candidatesTokenCount\":
|
||||
13,\n \"totalTokenCount\": 176,\n \"trafficType\": \"ON_DEMAND\",\n
|
||||
\ \"promptTokensDetails\": [\n {\n \"modality\": \"TEXT\",\n
|
||||
\ \"tokenCount\": 163\n }\n ],\n \"candidatesTokensDetails\":
|
||||
[\n {\n \"modality\": \"TEXT\",\n \"tokenCount\": 13\n
|
||||
\ }\n ]\n },\n \"modelVersion\": \"gemini-2.0-flash-exp\",\n \"createTime\":
|
||||
\"2026-01-15T22:27:38.066749Z\",\n \"responseId\": \"2mlpab2JBNOFidsPh5GigQs\"\n}\n"
|
||||
headers:
|
||||
Alt-Svc:
|
||||
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
|
||||
Content-Type:
|
||||
- application/json; charset=UTF-8
|
||||
Date:
|
||||
- Thu, 15 Jan 2026 22:27:38 GMT
|
||||
Server:
|
||||
- scaffolding on HTTPServer2
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
Vary:
|
||||
- Origin
|
||||
- X-Origin
|
||||
- Referer
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
X-Frame-Options:
|
||||
- X-FRAME-OPTIONS-XXX
|
||||
X-XSS-Protection:
|
||||
- '0'
|
||||
content-length:
|
||||
- '786'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -1,99 +0,0 @@
|
||||
"""Tests for event context management."""
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai.events.event_context import (
|
||||
SCOPE_ENDING_EVENTS,
|
||||
SCOPE_STARTING_EVENTS,
|
||||
VALID_EVENT_PAIRS,
|
||||
EmptyStackError,
|
||||
EventPairingError,
|
||||
MismatchBehavior,
|
||||
StackDepthExceededError,
|
||||
_event_context_config,
|
||||
EventContextConfig,
|
||||
get_current_parent_id,
|
||||
get_enclosing_parent_id,
|
||||
handle_empty_pop,
|
||||
handle_mismatch,
|
||||
pop_event_scope,
|
||||
push_event_scope,
|
||||
)
|
||||
|
||||
|
||||
class TestStackOperations:
|
||||
"""Tests for stack push/pop operations."""
|
||||
|
||||
def test_empty_stack_returns_none(self) -> None:
|
||||
assert get_current_parent_id() is None
|
||||
assert get_enclosing_parent_id() is None
|
||||
|
||||
def test_push_and_get_parent(self) -> None:
|
||||
push_event_scope("event-1", "task_started")
|
||||
assert get_current_parent_id() == "event-1"
|
||||
|
||||
def test_nested_push(self) -> None:
|
||||
push_event_scope("event-1", "crew_kickoff_started")
|
||||
push_event_scope("event-2", "task_started")
|
||||
assert get_current_parent_id() == "event-2"
|
||||
assert get_enclosing_parent_id() == "event-1"
|
||||
|
||||
def test_pop_restores_parent(self) -> None:
|
||||
push_event_scope("event-1", "crew_kickoff_started")
|
||||
push_event_scope("event-2", "task_started")
|
||||
popped = pop_event_scope()
|
||||
assert popped == ("event-2", "task_started")
|
||||
assert get_current_parent_id() == "event-1"
|
||||
|
||||
def test_pop_empty_stack_returns_none(self) -> None:
|
||||
assert pop_event_scope() is None
|
||||
|
||||
|
||||
class TestStackDepthLimit:
|
||||
"""Tests for stack depth limit."""
|
||||
|
||||
def test_depth_limit_exceeded_raises(self) -> None:
|
||||
_event_context_config.set(EventContextConfig(max_stack_depth=3))
|
||||
|
||||
push_event_scope("event-1", "type-1")
|
||||
push_event_scope("event-2", "type-2")
|
||||
push_event_scope("event-3", "type-3")
|
||||
|
||||
with pytest.raises(StackDepthExceededError):
|
||||
push_event_scope("event-4", "type-4")
|
||||
|
||||
|
||||
class TestMismatchHandling:
|
||||
"""Tests for mismatch behavior."""
|
||||
|
||||
def test_handle_mismatch_raises_when_configured(self) -> None:
|
||||
_event_context_config.set(
|
||||
EventContextConfig(mismatch_behavior=MismatchBehavior.RAISE)
|
||||
)
|
||||
|
||||
with pytest.raises(EventPairingError):
|
||||
handle_mismatch("task_completed", "llm_call_started", "task_started")
|
||||
|
||||
def test_handle_empty_pop_raises_when_configured(self) -> None:
|
||||
_event_context_config.set(
|
||||
EventContextConfig(empty_pop_behavior=MismatchBehavior.RAISE)
|
||||
)
|
||||
|
||||
with pytest.raises(EmptyStackError):
|
||||
handle_empty_pop("task_completed")
|
||||
|
||||
|
||||
class TestEventTypeSets:
|
||||
"""Tests for event type set completeness."""
|
||||
|
||||
def test_all_ending_events_have_pairs(self) -> None:
|
||||
for ending_event in SCOPE_ENDING_EVENTS:
|
||||
assert ending_event in VALID_EVENT_PAIRS
|
||||
|
||||
def test_all_pairs_reference_starting_events(self) -> None:
|
||||
for ending_event, starting_event in VALID_EVENT_PAIRS.items():
|
||||
assert starting_event in SCOPE_STARTING_EVENTS
|
||||
|
||||
def test_starting_and_ending_are_disjoint(self) -> None:
|
||||
overlap = SCOPE_STARTING_EVENTS & SCOPE_ENDING_EVENTS
|
||||
assert not overlap
|
||||
@@ -1,508 +0,0 @@
|
||||
"""Tests for event ordering and parent-child relationships."""
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.crew import Crew
|
||||
from crewai.events.base_events import BaseEvent
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.agent_events import (
|
||||
AgentExecutionCompletedEvent,
|
||||
AgentExecutionStartedEvent,
|
||||
)
|
||||
from crewai.events.types.crew_events import (
|
||||
CrewKickoffCompletedEvent,
|
||||
CrewKickoffStartedEvent,
|
||||
)
|
||||
from crewai.events.types.flow_events import (
|
||||
FlowFinishedEvent,
|
||||
FlowStartedEvent,
|
||||
MethodExecutionFinishedEvent,
|
||||
MethodExecutionStartedEvent,
|
||||
)
|
||||
from crewai.events.types.llm_events import (
|
||||
LLMCallCompletedEvent,
|
||||
LLMCallStartedEvent,
|
||||
)
|
||||
from crewai.events.types.task_events import (
|
||||
TaskCompletedEvent,
|
||||
TaskStartedEvent,
|
||||
)
|
||||
from crewai.flow.flow import Flow, listen, start
|
||||
from crewai.task import Task
|
||||
|
||||
|
||||
class EventCollector:
|
||||
"""Collects events and provides helpers to find related events."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.events: list[BaseEvent] = []
|
||||
|
||||
def add(self, event: BaseEvent) -> None:
|
||||
self.events.append(event)
|
||||
|
||||
def first(self, event_type: type[BaseEvent]) -> BaseEvent | None:
|
||||
for e in self.events:
|
||||
if isinstance(e, event_type):
|
||||
return e
|
||||
return None
|
||||
|
||||
def all_of(self, event_type: type[BaseEvent]) -> list[BaseEvent]:
|
||||
return [e for e in self.events if isinstance(e, event_type)]
|
||||
|
||||
def with_parent(self, parent_id: str) -> list[BaseEvent]:
|
||||
return [e for e in self.events if e.parent_event_id == parent_id]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def collector() -> EventCollector:
|
||||
"""Fixture that collects events during test execution."""
|
||||
c = EventCollector()
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffStartedEvent)
|
||||
def h1(source, event):
|
||||
c.add(event)
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffCompletedEvent)
|
||||
def h2(source, event):
|
||||
c.add(event)
|
||||
|
||||
@crewai_event_bus.on(TaskStartedEvent)
|
||||
def h3(source, event):
|
||||
c.add(event)
|
||||
|
||||
@crewai_event_bus.on(TaskCompletedEvent)
|
||||
def h4(source, event):
|
||||
c.add(event)
|
||||
|
||||
@crewai_event_bus.on(AgentExecutionStartedEvent)
|
||||
def h5(source, event):
|
||||
c.add(event)
|
||||
|
||||
@crewai_event_bus.on(AgentExecutionCompletedEvent)
|
||||
def h6(source, event):
|
||||
c.add(event)
|
||||
|
||||
@crewai_event_bus.on(LLMCallStartedEvent)
|
||||
def h7(source, event):
|
||||
c.add(event)
|
||||
|
||||
@crewai_event_bus.on(LLMCallCompletedEvent)
|
||||
def h8(source, event):
|
||||
c.add(event)
|
||||
|
||||
@crewai_event_bus.on(FlowStartedEvent)
|
||||
def h9(source, event):
|
||||
c.add(event)
|
||||
|
||||
@crewai_event_bus.on(FlowFinishedEvent)
|
||||
def h10(source, event):
|
||||
c.add(event)
|
||||
|
||||
@crewai_event_bus.on(MethodExecutionStartedEvent)
|
||||
def h11(source, event):
|
||||
c.add(event)
|
||||
|
||||
@crewai_event_bus.on(MethodExecutionFinishedEvent)
|
||||
def h12(source, event):
|
||||
c.add(event)
|
||||
|
||||
return c
|
||||
|
||||
|
||||
class TestCrewEventOrdering:
|
||||
"""Tests for event ordering in crew execution."""
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_crew_events_have_event_ids(self, collector: EventCollector) -> None:
|
||||
"""Every crew event should have a unique event_id."""
|
||||
agent = Agent(
|
||||
role="Responder",
|
||||
goal="Respond briefly",
|
||||
backstory="You give short answers.",
|
||||
verbose=False,
|
||||
)
|
||||
task = Task(
|
||||
description="Say 'hello' and nothing else.",
|
||||
expected_output="The word hello.",
|
||||
agent=agent,
|
||||
)
|
||||
crew = Crew(agents=[agent], tasks=[task], verbose=False)
|
||||
crew.kickoff()
|
||||
crewai_event_bus.flush()
|
||||
|
||||
started = collector.first(CrewKickoffStartedEvent)
|
||||
completed = collector.first(CrewKickoffCompletedEvent)
|
||||
|
||||
assert started is not None
|
||||
assert started.event_id is not None
|
||||
assert len(started.event_id) > 0
|
||||
|
||||
assert completed is not None
|
||||
assert completed.event_id is not None
|
||||
assert completed.event_id != started.event_id
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_crew_completed_after_started(self, collector: EventCollector) -> None:
|
||||
"""Crew completed event should have higher sequence than started."""
|
||||
agent = Agent(
|
||||
role="Responder",
|
||||
goal="Respond briefly",
|
||||
backstory="You give short answers.",
|
||||
verbose=False,
|
||||
)
|
||||
task = Task(
|
||||
description="Say 'yes' and nothing else.",
|
||||
expected_output="The word yes.",
|
||||
agent=agent,
|
||||
)
|
||||
crew = Crew(agents=[agent], tasks=[task], verbose=False)
|
||||
crew.kickoff()
|
||||
crewai_event_bus.flush()
|
||||
|
||||
started = collector.first(CrewKickoffStartedEvent)
|
||||
completed = collector.first(CrewKickoffCompletedEvent)
|
||||
|
||||
assert started is not None
|
||||
assert completed is not None
|
||||
assert started.emission_sequence is not None
|
||||
assert completed.emission_sequence is not None
|
||||
assert completed.emission_sequence > started.emission_sequence
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_task_parent_is_crew(self, collector: EventCollector) -> None:
|
||||
"""Task events should have crew event as parent."""
|
||||
agent = Agent(
|
||||
role="Responder",
|
||||
goal="Respond briefly",
|
||||
backstory="You give short answers.",
|
||||
verbose=False,
|
||||
)
|
||||
task = Task(
|
||||
description="Say 'ok' and nothing else.",
|
||||
expected_output="The word ok.",
|
||||
agent=agent,
|
||||
)
|
||||
crew = Crew(agents=[agent], tasks=[task], verbose=False)
|
||||
crew.kickoff()
|
||||
crewai_event_bus.flush()
|
||||
|
||||
crew_started = collector.first(CrewKickoffStartedEvent)
|
||||
task_started = collector.first(TaskStartedEvent)
|
||||
|
||||
assert crew_started is not None
|
||||
assert task_started is not None
|
||||
assert task_started.parent_event_id == crew_started.event_id
|
||||
|
||||
|
||||
class TestAgentEventOrdering:
|
||||
"""Tests for event ordering in agent execution."""
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_events_have_event_ids(self, collector: EventCollector) -> None:
|
||||
"""Agent execution events should have event_ids."""
|
||||
agent = Agent(
|
||||
role="Helper",
|
||||
goal="Help with tasks",
|
||||
backstory="You help.",
|
||||
verbose=False,
|
||||
)
|
||||
task = Task(
|
||||
description="Say 'done' and nothing else.",
|
||||
expected_output="The word done.",
|
||||
agent=agent,
|
||||
)
|
||||
agent.execute_task(task)
|
||||
crewai_event_bus.flush()
|
||||
|
||||
started = collector.first(AgentExecutionStartedEvent)
|
||||
completed = collector.first(AgentExecutionCompletedEvent)
|
||||
|
||||
if started:
|
||||
assert started.event_id is not None
|
||||
|
||||
if completed:
|
||||
assert completed.event_id is not None
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_llm_events_have_parent(self, collector: EventCollector) -> None:
|
||||
"""LLM call events should have a parent event."""
|
||||
agent = Agent(
|
||||
role="Helper",
|
||||
goal="Help with tasks",
|
||||
backstory="You help.",
|
||||
verbose=False,
|
||||
)
|
||||
task = Task(
|
||||
description="Say 'hi' and nothing else.",
|
||||
expected_output="The word hi.",
|
||||
agent=agent,
|
||||
)
|
||||
agent.execute_task(task)
|
||||
crewai_event_bus.flush()
|
||||
|
||||
llm_started = collector.first(LLMCallStartedEvent)
|
||||
|
||||
if llm_started:
|
||||
assert llm_started.event_id is not None
|
||||
# LLM events should have some parent in the hierarchy
|
||||
assert llm_started.parent_event_id is not None
|
||||
|
||||
|
||||
class TestFlowWithCrewEventOrdering:
|
||||
"""Tests for event ordering in flows containing crews."""
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_flow_events_have_ids(self, collector: EventCollector) -> None:
|
||||
"""Flow events should have event_ids."""
|
||||
agent = Agent(
|
||||
role="Worker",
|
||||
goal="Do work",
|
||||
backstory="You work.",
|
||||
verbose=False,
|
||||
)
|
||||
task = Task(
|
||||
description="Say 'complete' and nothing else.",
|
||||
expected_output="The word complete.",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
class SimpleFlow(Flow):
|
||||
@start()
|
||||
def run_crew(self):
|
||||
c = Crew(agents=[agent], tasks=[task], verbose=False)
|
||||
return c.kickoff()
|
||||
|
||||
flow = SimpleFlow()
|
||||
flow.kickoff()
|
||||
crewai_event_bus.flush()
|
||||
|
||||
flow_started = collector.first(FlowStartedEvent)
|
||||
flow_finished = collector.first(FlowFinishedEvent)
|
||||
|
||||
assert flow_started is not None
|
||||
assert flow_started.event_id is not None
|
||||
|
||||
assert flow_finished is not None
|
||||
assert flow_finished.event_id is not None
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_method_parent_is_flow(self, collector: EventCollector) -> None:
|
||||
"""Method execution events should have flow as parent."""
|
||||
agent = Agent(
|
||||
role="Worker",
|
||||
goal="Do work",
|
||||
backstory="You work.",
|
||||
verbose=False,
|
||||
)
|
||||
task = Task(
|
||||
description="Say 'ready' and nothing else.",
|
||||
expected_output="The word ready.",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
class FlowWithMethod(Flow):
|
||||
@start()
|
||||
def my_method(self):
|
||||
c = Crew(agents=[agent], tasks=[task], verbose=False)
|
||||
return c.kickoff()
|
||||
|
||||
flow = FlowWithMethod()
|
||||
flow.kickoff()
|
||||
crewai_event_bus.flush()
|
||||
|
||||
flow_started = collector.first(FlowStartedEvent)
|
||||
method_started = collector.first(MethodExecutionStartedEvent)
|
||||
|
||||
assert flow_started is not None
|
||||
assert method_started is not None
|
||||
assert method_started.parent_event_id == flow_started.event_id
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_crew_parent_is_method(self, collector: EventCollector) -> None:
|
||||
"""Crew inside flow method should have method as parent."""
|
||||
agent = Agent(
|
||||
role="Worker",
|
||||
goal="Do work",
|
||||
backstory="You work.",
|
||||
verbose=False,
|
||||
)
|
||||
task = Task(
|
||||
description="Say 'go' and nothing else.",
|
||||
expected_output="The word go.",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
class FlowWithCrew(Flow):
|
||||
@start()
|
||||
def run_it(self):
|
||||
c = Crew(agents=[agent], tasks=[task], verbose=False)
|
||||
return c.kickoff()
|
||||
|
||||
flow = FlowWithCrew()
|
||||
flow.kickoff()
|
||||
crewai_event_bus.flush()
|
||||
|
||||
method_started = collector.first(MethodExecutionStartedEvent)
|
||||
crew_started = collector.first(CrewKickoffStartedEvent)
|
||||
|
||||
assert method_started is not None
|
||||
assert crew_started is not None
|
||||
assert crew_started.parent_event_id == method_started.event_id
|
||||
|
||||
|
||||
class TestFlowWithMultipleCrewsEventOrdering:
|
||||
"""Tests for event ordering in flows with multiple crews."""
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_two_crews_have_different_ids(self, collector: EventCollector) -> None:
|
||||
"""Two crews in a flow should have different event_ids."""
|
||||
agent1 = Agent(
|
||||
role="First",
|
||||
goal="Be first",
|
||||
backstory="You go first.",
|
||||
verbose=False,
|
||||
)
|
||||
agent2 = Agent(
|
||||
role="Second",
|
||||
goal="Be second",
|
||||
backstory="You go second.",
|
||||
verbose=False,
|
||||
)
|
||||
task1 = Task(
|
||||
description="Say '1' and nothing else.",
|
||||
expected_output="The number 1.",
|
||||
agent=agent1,
|
||||
)
|
||||
task2 = Task(
|
||||
description="Say '2' and nothing else.",
|
||||
expected_output="The number 2.",
|
||||
agent=agent2,
|
||||
)
|
||||
|
||||
class TwoCrewFlow(Flow):
|
||||
@start()
|
||||
def first(self):
|
||||
c = Crew(agents=[agent1], tasks=[task1], verbose=False)
|
||||
return c.kickoff()
|
||||
|
||||
@listen(first)
|
||||
def second(self, _):
|
||||
c = Crew(agents=[agent2], tasks=[task2], verbose=False)
|
||||
return c.kickoff()
|
||||
|
||||
flow = TwoCrewFlow()
|
||||
flow.kickoff()
|
||||
crewai_event_bus.flush()
|
||||
|
||||
crew_started_events = collector.all_of(CrewKickoffStartedEvent)
|
||||
|
||||
assert len(crew_started_events) >= 2
|
||||
assert crew_started_events[0].event_id != crew_started_events[1].event_id
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_second_crew_after_first(self, collector: EventCollector) -> None:
|
||||
"""Second crew should have higher sequence than first."""
|
||||
agent1 = Agent(
|
||||
role="First",
|
||||
goal="Be first",
|
||||
backstory="You go first.",
|
||||
verbose=False,
|
||||
)
|
||||
agent2 = Agent(
|
||||
role="Second",
|
||||
goal="Be second",
|
||||
backstory="You go second.",
|
||||
verbose=False,
|
||||
)
|
||||
task1 = Task(
|
||||
description="Say 'a' and nothing else.",
|
||||
expected_output="The letter a.",
|
||||
agent=agent1,
|
||||
)
|
||||
task2 = Task(
|
||||
description="Say 'b' and nothing else.",
|
||||
expected_output="The letter b.",
|
||||
agent=agent2,
|
||||
)
|
||||
|
||||
class SequentialCrewFlow(Flow):
|
||||
@start()
|
||||
def crew_a(self):
|
||||
c = Crew(agents=[agent1], tasks=[task1], verbose=False)
|
||||
return c.kickoff()
|
||||
|
||||
@listen(crew_a)
|
||||
def crew_b(self, _):
|
||||
c = Crew(agents=[agent2], tasks=[task2], verbose=False)
|
||||
return c.kickoff()
|
||||
|
||||
flow = SequentialCrewFlow()
|
||||
flow.kickoff()
|
||||
crewai_event_bus.flush()
|
||||
|
||||
crew_started_events = collector.all_of(CrewKickoffStartedEvent)
|
||||
|
||||
assert len(crew_started_events) >= 2
|
||||
first = crew_started_events[0]
|
||||
second = crew_started_events[1]
|
||||
|
||||
assert first.emission_sequence is not None
|
||||
assert second.emission_sequence is not None
|
||||
assert second.emission_sequence > first.emission_sequence
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_tasks_have_correct_crew_parents(self, collector: EventCollector) -> None:
|
||||
"""Tasks in different crews should have their own crew as parent."""
|
||||
agent1 = Agent(
|
||||
role="Alpha",
|
||||
goal="Do alpha work",
|
||||
backstory="You are alpha.",
|
||||
verbose=False,
|
||||
)
|
||||
agent2 = Agent(
|
||||
role="Beta",
|
||||
goal="Do beta work",
|
||||
backstory="You are beta.",
|
||||
verbose=False,
|
||||
)
|
||||
task1 = Task(
|
||||
description="Say 'alpha' and nothing else.",
|
||||
expected_output="The word alpha.",
|
||||
agent=agent1,
|
||||
)
|
||||
task2 = Task(
|
||||
description="Say 'beta' and nothing else.",
|
||||
expected_output="The word beta.",
|
||||
agent=agent2,
|
||||
)
|
||||
|
||||
class ParentTestFlow(Flow):
|
||||
@start()
|
||||
def alpha_crew(self):
|
||||
c = Crew(agents=[agent1], tasks=[task1], verbose=False)
|
||||
return c.kickoff()
|
||||
|
||||
@listen(alpha_crew)
|
||||
def beta_crew(self, _):
|
||||
c = Crew(agents=[agent2], tasks=[task2], verbose=False)
|
||||
return c.kickoff()
|
||||
|
||||
flow = ParentTestFlow()
|
||||
flow.kickoff()
|
||||
crewai_event_bus.flush()
|
||||
|
||||
crew_started_events = collector.all_of(CrewKickoffStartedEvent)
|
||||
task_started_events = collector.all_of(TaskStartedEvent)
|
||||
|
||||
assert len(crew_started_events) >= 2
|
||||
assert len(task_started_events) >= 2
|
||||
|
||||
crew1_id = crew_started_events[0].event_id
|
||||
crew2_id = crew_started_events[1].event_id
|
||||
|
||||
task1_parent = task_started_events[0].parent_event_id
|
||||
task2_parent = task_started_events[1].parent_event_id
|
||||
|
||||
assert task1_parent == crew1_id
|
||||
assert task2_parent == crew2_id
|
||||
@@ -728,3 +728,39 @@ def test_google_streaming_returns_usage_metrics():
|
||||
assert result.token_usage.prompt_tokens > 0
|
||||
assert result.token_usage.completion_tokens > 0
|
||||
assert result.token_usage.successful_requests >= 1
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_google_express_mode_works() -> None:
|
||||
"""
|
||||
Test Google Vertex AI Express mode with API key authentication.
|
||||
This tests Vertex AI Express mode (aiplatform.googleapis.com) with API key
|
||||
authentication.
|
||||
|
||||
"""
|
||||
with patch.dict(os.environ, {"GOOGLE_GENAI_USE_VERTEXAI": "true"}):
|
||||
agent = Agent(
|
||||
role="Research Assistant",
|
||||
goal="Find information about the capital of Japan",
|
||||
backstory="You are a helpful research assistant.",
|
||||
llm=LLM(
|
||||
model="gemini/gemini-2.0-flash-exp",
|
||||
),
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="What is the capital of Japan?",
|
||||
expected_output="The capital of Japan",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
result = crew.kickoff()
|
||||
|
||||
assert result.token_usage is not None
|
||||
assert result.token_usage.total_tokens > 0
|
||||
assert result.token_usage.prompt_tokens > 0
|
||||
assert result.token_usage.completion_tokens > 0
|
||||
assert result.token_usage.successful_requests >= 1
|
||||
|
||||
@@ -70,9 +70,6 @@ def test_long_term_memory_save_events(long_term_memory):
|
||||
"from_agent": None,
|
||||
"agent_role": "test_agent",
|
||||
"agent_id": None,
|
||||
"event_id": ANY,
|
||||
"parent_event_id": None,
|
||||
"emission_sequence": ANY,
|
||||
"value": "test_task",
|
||||
"metadata": {"task": "test_task", "quality": 0.5},
|
||||
}
|
||||
@@ -88,9 +85,6 @@ def test_long_term_memory_save_events(long_term_memory):
|
||||
"from_agent": None,
|
||||
"agent_role": "test_agent",
|
||||
"agent_id": None,
|
||||
"event_id": ANY,
|
||||
"parent_event_id": None,
|
||||
"emission_sequence": ANY,
|
||||
"value": "test_task",
|
||||
"metadata": {
|
||||
"task": "test_task",
|
||||
@@ -145,9 +139,6 @@ def test_long_term_memory_search_events(long_term_memory):
|
||||
"from_agent": None,
|
||||
"agent_role": None,
|
||||
"agent_id": None,
|
||||
"event_id": ANY,
|
||||
"parent_event_id": None,
|
||||
"emission_sequence": ANY,
|
||||
"query": "test query",
|
||||
"limit": 5,
|
||||
"score_threshold": None,
|
||||
@@ -165,9 +156,6 @@ def test_long_term_memory_search_events(long_term_memory):
|
||||
"from_agent": None,
|
||||
"agent_role": None,
|
||||
"agent_id": None,
|
||||
"event_id": ANY,
|
||||
"parent_event_id": ANY,
|
||||
"emission_sequence": ANY,
|
||||
"query": "test query",
|
||||
"results": None,
|
||||
"limit": 5,
|
||||
|
||||
@@ -81,9 +81,6 @@ def test_short_term_memory_search_events(short_term_memory):
|
||||
"from_agent": None,
|
||||
"agent_role": None,
|
||||
"agent_id": None,
|
||||
"event_id": ANY,
|
||||
"parent_event_id": None,
|
||||
"emission_sequence": ANY,
|
||||
"query": "test value",
|
||||
"limit": 3,
|
||||
"score_threshold": 0.35,
|
||||
@@ -101,9 +98,6 @@ def test_short_term_memory_search_events(short_term_memory):
|
||||
"from_agent": None,
|
||||
"agent_role": None,
|
||||
"agent_id": None,
|
||||
"event_id": ANY,
|
||||
"parent_event_id": None,
|
||||
"emission_sequence": ANY,
|
||||
"query": "test value",
|
||||
"results": [],
|
||||
"limit": 3,
|
||||
@@ -156,9 +150,6 @@ def test_short_term_memory_save_events(short_term_memory):
|
||||
"from_agent": None,
|
||||
"agent_role": None,
|
||||
"agent_id": None,
|
||||
"event_id": ANY,
|
||||
"parent_event_id": None,
|
||||
"emission_sequence": ANY,
|
||||
"value": "test value",
|
||||
"metadata": {"task": "test_task"},
|
||||
}
|
||||
@@ -175,9 +166,6 @@ def test_short_term_memory_save_events(short_term_memory):
|
||||
"from_agent": None,
|
||||
"agent_role": None,
|
||||
"agent_id": None,
|
||||
"event_id": ANY,
|
||||
"parent_event_id": None,
|
||||
"emission_sequence": ANY,
|
||||
"value": "test value",
|
||||
"metadata": {"task": "test_task"},
|
||||
"save_time_ms": ANY,
|
||||
|
||||
Reference in New Issue
Block a user