Compare commits

..

10 Commits

Author SHA1 Message Date
tonykipkemboi
e08d210bb8 docs(mintlify): remove OpenAPI Endpoints groups; add localized MDX endpoint pages for pt-BR and ko 2025-08-20 11:23:31 -04:00
tonykipkemboi
34100d290b docs(mintlify): add explicit endpoint MDX pages and include in nav; keep OpenAPI auto-gen as fallback 2025-08-20 11:13:05 -04:00
tonykipkemboi
46f8fa59c1 docs(mintlify): use explicit openapi {source, directory} with absolute paths to fix branch deployment routing 2025-08-20 11:07:09 -04:00
tonykipkemboi
4867dced0e docs: fix API Reference OpenAPI sources and redirects; clarify training data usage; add Mermaid diagram; correct CLI usage and notes 2025-08-20 10:51:51 -04:00
Greyson LaLonde
ed187b495b feat: centralize embedding types and create base client (#3246)
feat: add RAG system foundation with generic vector store support

- Add BaseClient protocol for vector stores
- Move BaseRAGStorage to rag/core
- Centralize embedding types in embeddings/types.py
- Remove unused storage models
2025-08-20 09:35:27 -04:00
Wajeeh ul Hassan
2773996b49 fix: revert pin openai<1.100.0 to openai>=1.13.3 (#3364) 2025-08-20 09:16:26 -04:00
Damian Silbergleith
95923b78c6 feat: display task name in verbose output (#3308)
* feat: display task name in verbose output

- Modified event_listener.py to pass task names to the formatter
- Updated console_formatter.py to display task names when available
- Maintains backward compatibility by showing UUID for tasks without names
- Makes verbose output more informative and readable

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>

* fix: remove unnecessary f-string prefixes in console formatter

Remove extraneous f prefixes from string literals without placeholders
in console_formatter.py to resolve ruff F541 linting errors.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>

---------

Co-authored-by: Claude <noreply@anthropic.com>
2025-08-20 08:43:05 -04:00
Lucas Gomide
7065ad4336 feat: adding additional parameter to Flow' start methods (#3356)
Some checks failed
Notify Downstream / notify-downstream (push) Has been cancelled
Mark stale issues and pull requests / stale (push) Has been cancelled
* feat: adding additional parameter to Flow' start methods

When the `crewai_trigger_payload` parameter exists in the input Flow, we will add it in the start Flow methods as parameter

* fix: support crewai_trigger_payload in async Flow start methods
2025-08-19 17:32:19 -04:00
Lorenze Jay
d6254918fd Lorenze/max retry defaults tools (#3362)
* feat: enhance BaseTool and CrewStructuredTool with usage tracking

This commit introduces a mechanism to track the usage count of tools within the CrewAI framework. The `BaseTool` class now includes a `_increment_usage_count` method that updates the current usage count, which is also reflected in the associated `CrewStructuredTool`. Additionally, a new test has been added to ensure that the maximum usage count is respected when invoking tools, enhancing the overall reliability and functionality of the tool system.

* feat: add max usage count feature to tools documentation

This commit introduces a new section in the tools overview documentation that explains the maximum usage count feature for tools within the CrewAI framework. Users can now set a limit on how many times a tool can be used, enhancing control over tool usage. An example of implementing the `FileReadTool` with a maximum usage count is also provided, improving the clarity and usability of the documentation.

* undo field string
2025-08-19 10:44:55 -07:00
Heitor Carvalho
95e3d6db7a fix: add 'tool' section migration when running crewai update (#3341)
Some checks failed
Notify Downstream / notify-downstream (push) Has been cancelled
2025-08-19 08:11:30 -04:00
29 changed files with 2026 additions and 48 deletions

View File

@@ -341,11 +341,12 @@
"groups": [
{
"group": "Getting Started",
"pages": ["en/api-reference/introduction"]
},
{
"group": "Endpoints",
"openapi": "https://raw.githubusercontent.com/crewAIInc/crewAI/main/docs/enterprise-api.en.yaml"
"pages": [
"en/api-reference/introduction",
"en/api-reference/inputs",
"en/api-reference/kickoff",
"en/api-reference/status"
]
}
]
},
@@ -680,11 +681,12 @@
"groups": [
{
"group": "Começando",
"pages": ["pt-BR/api-reference/introduction"]
},
{
"group": "Endpoints",
"openapi": "https://raw.githubusercontent.com/crewAIInc/crewAI/main/docs/enterprise-api.pt-BR.yaml"
"pages": [
"pt-BR/api-reference/introduction",
"pt-BR/api-reference/inputs",
"pt-BR/api-reference/kickoff",
"pt-BR/api-reference/status"
]
}
]
},
@@ -1026,11 +1028,12 @@
"groups": [
{
"group": "시작 안내",
"pages": ["ko/api-reference/introduction"]
},
{
"group": "Endpoints",
"openapi": "https://raw.githubusercontent.com/crewAIInc/crewAI/main/docs/enterprise-api.ko.yaml"
"pages": [
"ko/api-reference/introduction",
"ko/api-reference/inputs",
"ko/api-reference/kickoff",
"ko/api-reference/status"
]
}
]
},
@@ -1081,6 +1084,10 @@
"indexing": "all"
},
"redirects": [
{
"source": "/api-reference",
"destination": "/en/api-reference/introduction"
},
{
"source": "/introduction",
"destination": "/en/introduction"
@@ -1133,6 +1140,18 @@
"source": "/api-reference/:path*",
"destination": "/en/api-reference/:path*"
},
{
"source": "/en/api-reference",
"destination": "/en/api-reference/introduction"
},
{
"source": "/pt-BR/api-reference",
"destination": "/pt-BR/api-reference/introduction"
},
{
"source": "/ko/api-reference",
"destination": "/ko/api-reference/introduction"
},
{
"source": "/examples/:path*",
"destination": "/en/examples/:path*"

View File

@@ -0,0 +1,7 @@
---
title: "GET /inputs"
description: "Get required inputs for your crew"
openapi: "/enterprise-api.en.yaml GET /inputs"
---

View File

@@ -0,0 +1,7 @@
---
title: "POST /kickoff"
description: "Start a crew execution"
openapi: "/enterprise-api.en.yaml POST /kickoff"
---

View File

@@ -0,0 +1,7 @@
---
title: "GET /status/{kickoff_id}"
description: "Get execution status"
openapi: "/enterprise-api.en.yaml GET /status/{kickoff_id}"
---

View File

@@ -21,13 +21,17 @@ To use the training feature, follow these steps:
3. Run the following command:
```shell
crewai train -n <n_iterations> <filename> (optional)
crewai train -n <n_iterations> -f <filename.pkl>
```
<Tip>
Replace `<n_iterations>` with the desired number of training iterations and `<filename>` with the appropriate filename ending with `.pkl`.
</Tip>
### Training Your Crew Programmatically
<Note>
If you omit `-f`, the output defaults to `trained_agents_data.pkl` in the current working directory. You can pass an absolute path to control where the file is written.
</Note>
### Training your Crew programmatically
To train your crew programmatically, use the following steps:
@@ -51,19 +55,65 @@ except Exception as e:
raise Exception(f"An error occurred while training the crew: {e}")
```
### Key Points to Note
## How trained data is used by agents
- **Positive Integer Requirement:** Ensure that the number of iterations (`n_iterations`) is a positive integer. The code will raise a `ValueError` if this condition is not met.
- **Filename Requirement:** Ensure that the filename ends with `.pkl`. The code will raise a `ValueError` if this condition is not met.
- **Error Handling:** The code handles subprocess errors and unexpected exceptions, providing error messages to the user.
CrewAI uses the training artifacts in two ways: during training to incorporate your human feedback, and after training to guide agents with consolidated suggestions.
It is important to note that the training process may take some time, depending on the complexity of your agents and will also require your feedback on each iteration.
### Training data flow
Once the training is complete, your agents will be equipped with enhanced capabilities and knowledge, ready to tackle complex tasks and provide more consistent and valuable insights.
```mermaid
flowchart TD
A["Start training<br/>CLI: crewai train -n -f<br/>or Python: crew.train(...)"] --> B["Setup training mode<br/>- task.human_input = true<br/>- disable delegation<br/>- init training_data.pkl + trained file"]
Remember to regularly update and retrain your agents to ensure they stay up-to-date with the latest information and advancements in the field.
subgraph "Iterations"
direction LR
C["Iteration i<br/>initial_output"] --> D["User human_feedback"]
D --> E["improved_output"]
E --> F["Append to training_data.pkl<br/>by agent_id and iteration"]
end
Happy training with CrewAI! 🚀
B --> C
F --> G{"More iterations?"}
G -- "Yes" --> C
G -- "No" --> H["Evaluate per agent<br/>aggregate iterations"]
H --> I["Consolidate<br/>suggestions[] + quality + final_summary"]
I --> J["Save by agent role to trained file<br/>(default: trained_agents_data.pkl)"]
J --> K["Normal (non-training) runs"]
K --> L["Auto-load suggestions<br/>from trained_agents_data.pkl"]
L --> M["Append to prompt<br/>for consistent improvements"]
```
### During training runs
- On each iteration, the system records for every agent:
- `initial_output`: the agents first answer
- `human_feedback`: your inline feedback when prompted
- `improved_output`: the agents follow-up answer after feedback
- This data is stored in a working file named `training_data.pkl` keyed by the agents internal ID and iteration.
- While training is active, the agent automatically appends your prior human feedback to its prompt to enforce those instructions on subsequent attempts within the training session.
Training is interactive: tasks set `human_input = true`, so running in a non-interactive environment will block on user input.
### After training completes
- When `train(...)` finishes, CrewAI evaluates the collected training data per agent and produces a consolidated result containing:
- `suggestions`: clear, actionable instructions distilled from your feedback and the difference between initial/improved outputs
- `quality`: a 010 score capturing improvement
- `final_summary`: a step-by-step set of action items for future tasks
- These consolidated results are saved to the filename you pass to `train(...)` (default via CLI is `trained_agents_data.pkl`). Entries are keyed by the agents `role` so they can be applied across sessions.
- During normal (non-training) execution, each agent automatically loads its consolidated `suggestions` and appends them to the task prompt as mandatory instructions. This gives you consistent improvements without changing your agent definitions.
### File summary
- `training_data.pkl` (ephemeral, per-session):
- Structure: `agent_id -> { iteration_number: { initial_output, human_feedback, improved_output } }`
- Purpose: capture raw data and human feedback during training
- Location: saved in the current working directory (CWD)
- `trained_agents_data.pkl` (or your custom filename):
- Structure: `agent_role -> { suggestions: string[], quality: number, final_summary: string }`
- Purpose: persist consolidated guidance for future runs
- Location: written to the CWD by default; use `-f` to set a custom (including absolute) path
## Small Language Model Considerations
@@ -129,3 +179,18 @@ Happy training with CrewAI! 🚀
</Warning>
</Tab>
</Tabs>
### Key Points to Note
- **Positive Integer Requirement:** Ensure that the number of iterations (`n_iterations`) is a positive integer. The code will raise a `ValueError` if this condition is not met.
- **Filename Requirement:** Ensure that the filename ends with `.pkl`. The code will raise a `ValueError` if this condition is not met.
- **Error Handling:** The code handles subprocess errors and unexpected exceptions, providing error messages to the user.
- Trained guidance is applied at prompt time; it does not modify your Python/YAML agent configuration.
- Agents automatically load trained suggestions from a file named `trained_agents_data.pkl` located in the current working directory. If you trained to a different filename, either rename it to `trained_agents_data.pkl` before running, or adjust the loader in code.
- You can change the output filename when calling `crewai train` with `-f/--filename`. Absolute paths are supported if you want to save outside the CWD.
It is important to note that the training process may take some time, depending on the complexity of your agents and will also require your feedback on each iteration.
Once the training is complete, your agents will be equipped with enhanced capabilities and knowledge, ready to tackle complex tasks and provide more consistent and valuable insights.
Remember to regularly update and retrain your agents to ensure they stay up-to-date with the latest information and advancements in the field.

View File

@@ -117,4 +117,19 @@ agent = Agent(
)
```
## **Max Usage Count**
You can set a maximum usage count for a tool to prevent it from being used more than a certain number of times.
By default, the max usage count is unlimited.
```python
from crewai_tools import FileReadTool
tool = FileReadTool(max_usage_count=5, ...)
```
Ready to explore? Pick a category above to discover tools that fit your use case!

View File

@@ -0,0 +1,7 @@
---
title: "GET /inputs"
description: "크루가 필요로 하는 입력 확인"
openapi: "/enterprise-api.ko.yaml GET /inputs"
---

View File

@@ -0,0 +1,7 @@
---
title: "POST /kickoff"
description: "크루 실행 시작"
openapi: "/enterprise-api.ko.yaml POST /kickoff"
---

View File

@@ -0,0 +1,7 @@
---
title: "GET /status/{kickoff_id}"
description: "실행 상태 조회"
openapi: "/enterprise-api.ko.yaml GET /status/{kickoff_id}"
---

View File

@@ -0,0 +1,7 @@
---
title: "GET /inputs"
description: "Obter entradas necessárias para sua crew"
openapi: "/enterprise-api.pt-BR.yaml GET /inputs"
---

View File

@@ -0,0 +1,7 @@
---
title: "POST /kickoff"
description: "Iniciar a execução da crew"
openapi: "/enterprise-api.pt-BR.yaml POST /kickoff"
---

View File

@@ -0,0 +1,7 @@
---
title: "GET /status/{kickoff_id}"
description: "Obter o status da execução"
openapi: "/enterprise-api.pt-BR.yaml GET /status/{kickoff_id}"
---

View File

@@ -10,7 +10,7 @@ authors = [
dependencies = [
# Core Dependencies
"pydantic>=2.4.2",
"openai<1.100.0", # TODO: Temporarily pin due to https://github.com/openai/openai-python/issues/2564. Can revert once the issue is fixed.
"openai>=1.13.3",
"litellm==1.74.9",
"instructor>=1.3.3",
# Text Processing

View File

@@ -44,8 +44,9 @@ def migrate_pyproject(input_file, output_file):
]
new_pyproject["project"]["requires-python"] = poetry_data.get("python")
else:
# If it's already in the new format, just copy the project section
# If it's already in the new format, just copy the project and tool sections
new_pyproject["project"] = pyproject_data.get("project", {})
new_pyproject["tool"] = pyproject_data.get("tool", {})
# Migrate or copy dependencies
if "dependencies" in new_pyproject["project"]:

View File

@@ -913,17 +913,52 @@ class Flow(Generic[T], metaclass=FlowMeta):
- Triggers execution of any listeners waiting on this start method
- Part of the flow's initialization sequence
- Skips execution if method was already completed (e.g., after reload)
- Automatically injects crewai_trigger_payload if available in flow inputs
"""
if start_method_name in self._completed_methods:
last_output = self._method_outputs[-1] if self._method_outputs else None
await self._execute_listeners(start_method_name, last_output)
return
method = self._methods[start_method_name]
enhanced_method = self._inject_trigger_payload_for_start_method(method)
result = await self._execute_method(
start_method_name, self._methods[start_method_name]
start_method_name, enhanced_method
)
await self._execute_listeners(start_method_name, result)
def _inject_trigger_payload_for_start_method(self, original_method: Callable) -> Callable:
def prepare_kwargs(*args, **kwargs):
inputs = baggage.get_baggage("flow_inputs") or {}
trigger_payload = inputs.get("crewai_trigger_payload")
sig = inspect.signature(original_method)
accepts_trigger_payload = "crewai_trigger_payload" in sig.parameters
if trigger_payload is not None and accepts_trigger_payload:
kwargs["crewai_trigger_payload"] = trigger_payload
elif trigger_payload is not None:
self._log_flow_event(
f"Trigger payload available but {original_method.__name__} doesn't accept crewai_trigger_payload parameter",
color="yellow"
)
return args, kwargs
if asyncio.iscoroutinefunction(original_method):
async def enhanced_method(*args, **kwargs):
args, kwargs = prepare_kwargs(*args, **kwargs)
return await original_method(*args, **kwargs)
else:
def enhanced_method(*args, **kwargs):
args, kwargs = prepare_kwargs(*args, **kwargs)
return original_method(*args, **kwargs)
enhanced_method.__name__ = original_method.__name__
enhanced_method.__doc__ = original_method.__doc__
return enhanced_method
async def _execute_method(
self, method_name: str, method: Callable, *args: Any, **kwargs: Any
) -> Any:

View File

@@ -0,0 +1 @@
"""Core abstract base classes and protocols for RAG systems."""

View File

@@ -0,0 +1,433 @@
"""Protocol for vector database client implementations."""
from abc import abstractmethod
from typing import Any, Protocol, runtime_checkable, TypedDict, Annotated
from typing_extensions import Unpack, Required
from crewai.rag.types import (
EmbeddingFunction,
BaseRecord,
SearchResult,
)
class BaseCollectionParams(TypedDict):
"""Base parameters for collection operations.
Attributes:
collection_name: The name of the collection/index to operate on.
"""
collection_name: Required[
Annotated[
str,
"Name of the collection/index. Implementations may have specific constraints (e.g., character limits, allowed characters, case sensitivity).",
]
]
class BaseCollectionAddParams(BaseCollectionParams):
"""Parameters for adding documents to a collection.
Extends BaseCollectionParams with document-specific fields.
Attributes:
collection_name: The name of the collection to add documents to.
documents: List of BaseRecord dictionaries containing document data.
"""
documents: list[BaseRecord]
class BaseCollectionSearchParams(BaseCollectionParams, total=False):
"""Parameters for searching within a collection.
Extends BaseCollectionParams with search-specific optional fields.
All fields except collection_name and query are optional.
Attributes:
query: The text query to search for (required).
limit: Maximum number of results to return.
metadata_filter: Filter results by metadata fields.
score_threshold: Minimum similarity score for results (0-1).
"""
query: Required[str]
limit: int
metadata_filter: dict[str, Any]
score_threshold: float
@runtime_checkable
class BaseClient(Protocol):
"""Protocol for vector store client implementations.
This protocol defines the interface that all vector store client implementations
must follow. It provides a consistent API for storing and retrieving
documents with their vector embeddings across different vector database
backends (e.g., Qdrant, ChromaDB, Weaviate). Implementing classes should
handle connection management, data persistence, and vector similarity
search operations specific to their backend.
Implementation Guidelines:
Implementations should accept BaseClientParams in their constructor to allow
passing pre-configured client instances:
class MyVectorClient:
def __init__(self, client: Any | None = None, **kwargs):
if client:
self.client = client
else:
self.client = self._create_default_client(**kwargs)
Notes:
This protocol replaces the former BaseRAGStorage abstraction,
providing a cleaner interface for vector store operations.
Attributes:
embedding_function: Callable that takes a list of text strings
and returns a list of embedding vectors. Implementations
should always provide a default embedding function.
client: The underlying vector database client instance. This could be
passed via BaseClientParams during initialization or created internally.
"""
client: Any
embedding_function: EmbeddingFunction
@abstractmethod
def create_collection(self, **kwargs: Unpack[BaseCollectionParams]) -> None:
"""Create a new collection/index in the vector database.
Keyword Args:
collection_name: The name of the collection to create. Must be unique within
the vector database instance.
Raises:
ValueError: If collection name already exists.
ConnectionError: If unable to connect to the vector database backend.
"""
...
@abstractmethod
async def acreate_collection(self, **kwargs: Unpack[BaseCollectionParams]) -> None:
"""Create a new collection/index in the vector database asynchronously.
Keyword Args:
collection_name: The name of the collection to create. Must be unique within
the vector database instance.
Raises:
ValueError: If collection name already exists.
ConnectionError: If unable to connect to the vector database backend.
"""
...
@abstractmethod
def get_or_create_collection(self, **kwargs: Unpack[BaseCollectionParams]) -> Any:
"""Get an existing collection or create it if it doesn't exist.
This method provides a convenient way to ensure a collection exists
without having to check for its existence first.
Keyword Args:
collection_name: The name of the collection to get or create.
Returns:
A collection object whose type depends on the backend implementation.
This could be a collection reference, ID, or client object.
Raises:
ValueError: If unable to create the collection.
ConnectionError: If unable to connect to the vector database backend.
"""
...
@abstractmethod
async def aget_or_create_collection(
self, **kwargs: Unpack[BaseCollectionParams]
) -> Any:
"""Get an existing collection or create it if it doesn't exist asynchronously.
Keyword Args:
collection_name: The name of the collection to get or create.
Returns:
A collection object whose type depends on the backend implementation.
Raises:
ValueError: If unable to create the collection.
ConnectionError: If unable to connect to the vector database backend.
"""
...
@abstractmethod
def add_documents(self, **kwargs: Unpack[BaseCollectionAddParams]) -> None:
"""Add documents with their embeddings to a collection.
This method performs an upsert operation - if a document with the same ID
already exists, it will be updated with the new content and metadata.
Implementations should handle embedding generation internally based on
the configured embedding function.
Keyword Args:
collection_name: The name of the collection to add documents to.
documents: List of BaseRecord dicts containing:
- content: The text content (required)
- doc_id: Optional unique identifier (auto-generated from content hash if missing)
- metadata: Optional metadata dictionary
Embeddings will be generated automatically.
Raises:
ValueError: If collection doesn't exist or documents list is empty.
TypeError: If documents are not BaseRecord dict instances.
ConnectionError: If unable to connect to the vector database backend.
Example:
>>> from crewai.rag.chromadb.client import ChromaDBClient
>>> from crewai.rag.types import BaseRecord
>>> client = ChromaDBClient()
>>>
>>> records: list[BaseRecord] = [
... {
... "content": "Machine learning basics",
... "metadata": {"source": "file3", "topic": "ML"}
... },
... {
... "doc_id": "custom_id",
... "content": "Deep learning fundamentals",
... "metadata": {"source": "file4", "topic": "DL"}
... }
... ]
>>> client.add_documents(collection_name="my_docs", documents=records)
>>>
>>> records_with_id: list[BaseRecord] = [
... {
... "doc_id": "nlp_001",
... "content": "Advanced NLP techniques",
... "metadata": {"source": "file5", "topic": "NLP"}
... }
... ]
>>> client.add_documents(collection_name="my_docs", documents=records_with_id)
"""
...
@abstractmethod
async def aadd_documents(self, **kwargs: Unpack[BaseCollectionAddParams]) -> None:
"""Add documents with their embeddings to a collection asynchronously.
Implementations should handle embedding generation internally based on
the configured embedding function.
Keyword Args:
collection_name: The name of the collection to add documents to.
documents: List of BaseRecord dicts containing:
- content: The text content (required)
- doc_id: Optional unique identifier (auto-generated from content hash if missing)
- metadata: Optional metadata dictionary
Embeddings will be generated automatically.
Raises:
ValueError: If collection doesn't exist or documents list is empty.
TypeError: If documents are not BaseRecord dict instances.
ConnectionError: If unable to connect to the vector database backend.
Example:
>>> import asyncio
>>> from crewai.rag.chromadb.client import ChromaDBClient
>>> from crewai.rag.types import BaseRecord
>>>
>>> async def add_documents():
... client = ChromaDBClient()
...
... records: list[BaseRecord] = [
... {
... "doc_id": "doc2",
... "content": "Async operations in Python",
... "metadata": {"source": "file2", "topic": "async"}
... }
... ]
... await client.aadd_documents(collection_name="my_docs", documents=records)
...
>>> asyncio.run(add_documents())
"""
...
@abstractmethod
def search(
self, **kwargs: Unpack[BaseCollectionSearchParams]
) -> list[SearchResult]:
"""Search for similar documents using a query.
Performs a vector similarity search to find the most similar documents
to the provided query.
Keyword Args:
collection_name: The name of the collection to search in.
query: The text query to search for. The implementation handles
embedding generation internally.
limit: Maximum number of results to return. Defaults to 10.
metadata_filter: Optional metadata filter to apply to the search. The exact
format depends on the backend, but typically supports equality
and range queries on metadata fields.
score_threshold: Optional minimum similarity score threshold. Only
results with scores >= this threshold will be returned. The
score interpretation depends on the distance metric used.
Returns:
A list of SearchResult dictionaries ordered by similarity score in
descending order. Each result contains:
- id: Document ID
- content: Document text content
- metadata: Document metadata
- score: Similarity score (0-1, higher is better)
Raises:
ValueError: If collection doesn't exist.
ConnectionError: If unable to connect to the vector database backend.
Example:
>>> from crewai.rag.chromadb.client import ChromaDBClient
>>> client = ChromaDBClient()
>>>
>>> results = client.search(
... collection_name="my_docs",
... query="What is machine learning?",
... limit=5,
... metadata_filter={"source": "file1"},
... score_threshold=0.7
... )
>>> for result in results:
... print(f"{result['id']}: {result['score']:.2f}")
"""
...
@abstractmethod
async def asearch(
self, **kwargs: Unpack[BaseCollectionSearchParams]
) -> list[SearchResult]:
"""Search for similar documents using a query asynchronously.
Keyword Args:
collection_name: The name of the collection to search in.
query: The text query to search for. The implementation handles
embedding generation internally.
limit: Maximum number of results to return. Defaults to 10.
metadata_filter: Optional metadata filter to apply to the search.
score_threshold: Optional minimum similarity score threshold.
Returns:
A list of SearchResult dictionaries ordered by similarity score.
Raises:
ValueError: If collection doesn't exist.
ConnectionError: If unable to connect to the vector database backend.
Example:
>>> import asyncio
>>> from crewai.rag.chromadb.client import ChromaDBClient
>>>
>>> async def search_documents():
... client = ChromaDBClient()
... results = await client.asearch(
... collection_name="my_docs",
... query="Python programming best practices",
... limit=5,
... metadata_filter={"source": "file1"},
... score_threshold=0.7
... )
... for result in results:
... print(f"{result['id']}: {result['score']:.2f}")
...
>>> asyncio.run(search_documents())
"""
...
@abstractmethod
def delete_collection(self, **kwargs: Unpack[BaseCollectionParams]) -> None:
"""Delete a collection and all its data.
This operation is irreversible and will permanently remove all documents,
embeddings, and metadata associated with the collection.
Keyword Args:
collection_name: The name of the collection to delete.
Raises:
ValueError: If the collection doesn't exist.
ConnectionError: If unable to connect to the vector database backend.
Example:
>>> from crewai.rag.chromadb.client import ChromaDBClient
>>> client = ChromaDBClient()
>>> client.delete_collection(collection_name="old_docs")
>>> print("Collection 'old_docs' deleted successfully")
"""
...
@abstractmethod
async def adelete_collection(self, **kwargs: Unpack[BaseCollectionParams]) -> None:
"""Delete a collection and all its data asynchronously.
Keyword Args:
collection_name: The name of the collection to delete.
Raises:
ValueError: If the collection doesn't exist.
ConnectionError: If unable to connect to the vector database backend.
Example:
>>> import asyncio
>>> from crewai.rag.chromadb.client import ChromaDBClient
>>>
>>> async def delete_old_collection():
... client = ChromaDBClient()
... await client.adelete_collection(collection_name="old_docs")
... print("Collection 'old_docs' deleted successfully")
...
>>> asyncio.run(delete_old_collection())
"""
...
@abstractmethod
def reset(self) -> None:
"""Reset the vector database by deleting all collections and data.
This method provides a way to completely clear the vector database,
removing all collections and their contents. Use with caution as
this operation is irreversible.
Raises:
ConnectionError: If unable to connect to the vector database backend.
PermissionError: If the operation is not allowed by the backend.
Example:
>>> from crewai.rag.chromadb.client import ChromaDBClient
>>> client = ChromaDBClient()
>>> client.reset()
>>> print("Vector database completely reset - all data deleted")
"""
...
@abstractmethod
async def areset(self) -> None:
"""Reset the vector database by deleting all collections and data asynchronously.
Raises:
ConnectionError: If unable to connect to the vector database backend.
PermissionError: If the operation is not allowed by the backend.
Example:
>>> import asyncio
>>> from crewai.rag.chromadb.client import ChromaDBClient
>>>
>>> async def reset_database():
... client = ChromaDBClient()
... await client.areset()
... print("Vector database completely reset - all data deleted")
...
>>> asyncio.run(reset_database())
"""
...

View File

@@ -0,0 +1,30 @@
"""Base provider protocol for vector database client creation."""
from abc import ABC
from typing import Any, Protocol, runtime_checkable, Union
from pydantic import BaseModel, Field
from crewai.rag.types import EmbeddingFunction
from crewai.rag.embeddings.types import EmbeddingOptions
class BaseProviderOptions(BaseModel, ABC):
"""Base configuration for all provider options."""
client_type: str = Field(..., description="Type of client to create")
embedding_config: Union[EmbeddingOptions, EmbeddingFunction, None] = Field(
default=None,
description="Embedding configuration - either options for built-in providers or a custom function",
)
options: Any = Field(
default=None, description="Additional provider-specific options"
)
@runtime_checkable
class BaseProvider(Protocol):
"""Protocol for vector database client providers."""
def __call__(self, options: BaseProviderOptions) -> Any:
"""Create and return a configured client instance."""
...

View File

@@ -0,0 +1,148 @@
"""Minimal embedding function factory for CrewAI."""
import os
from chromadb import EmbeddingFunction
from chromadb.utils.embedding_functions.amazon_bedrock_embedding_function import (
AmazonBedrockEmbeddingFunction,
)
from chromadb.utils.embedding_functions.cohere_embedding_function import (
CohereEmbeddingFunction,
)
from chromadb.utils.embedding_functions.google_embedding_function import (
GooglePalmEmbeddingFunction,
GoogleGenerativeAiEmbeddingFunction,
GoogleVertexEmbeddingFunction,
)
from chromadb.utils.embedding_functions.huggingface_embedding_function import (
HuggingFaceEmbeddingFunction,
)
from chromadb.utils.embedding_functions.instructor_embedding_function import (
InstructorEmbeddingFunction,
)
from chromadb.utils.embedding_functions.jina_embedding_function import (
JinaEmbeddingFunction,
)
from chromadb.utils.embedding_functions.ollama_embedding_function import (
OllamaEmbeddingFunction,
)
from chromadb.utils.embedding_functions.onnx_mini_lm_l6_v2 import ONNXMiniLM_L6_V2
from chromadb.utils.embedding_functions.open_clip_embedding_function import (
OpenCLIPEmbeddingFunction,
)
from chromadb.utils.embedding_functions.openai_embedding_function import (
OpenAIEmbeddingFunction,
)
from chromadb.utils.embedding_functions.roboflow_embedding_function import (
RoboflowEmbeddingFunction,
)
from chromadb.utils.embedding_functions.sentence_transformer_embedding_function import (
SentenceTransformerEmbeddingFunction,
)
from chromadb.utils.embedding_functions.text2vec_embedding_function import (
Text2VecEmbeddingFunction,
)
from crewai.rag.embeddings.types import EmbeddingOptions
def get_embedding_function(
config: EmbeddingOptions | dict | None = None,
) -> EmbeddingFunction:
"""Get embedding function - delegates to ChromaDB.
Args:
config: Optional configuration - either an EmbeddingOptions object or a dict with:
- provider: The embedding provider to use (default: "openai")
- Any other provider-specific parameters
Returns:
EmbeddingFunction instance ready for use with ChromaDB
Supported providers:
- openai: OpenAI embeddings (default)
- cohere: Cohere embeddings
- ollama: Ollama local embeddings
- huggingface: HuggingFace embeddings
- sentence-transformer: Local sentence transformers
- instructor: Instructor embeddings for specialized tasks
- google-palm: Google PaLM embeddings
- google-generativeai: Google Generative AI embeddings
- google-vertex: Google Vertex AI embeddings
- amazon-bedrock: AWS Bedrock embeddings
- jina: Jina AI embeddings
- roboflow: Roboflow embeddings for vision tasks
- openclip: OpenCLIP embeddings for multimodal tasks
- text2vec: Text2Vec embeddings
- onnx: ONNX MiniLM-L6-v2 (no API key needed, included with ChromaDB)
Examples:
# Use default OpenAI with retry logic
>>> embedder = get_embedding_function()
# Use Cohere with dict
>>> embedder = get_embedding_function({
... "provider": "cohere",
... "api_key": "your-key",
... "model_name": "embed-english-v3.0"
... })
# Use with EmbeddingOptions
>>> embedder = get_embedding_function(
... EmbeddingOptions(provider="sentence-transformer", model_name="all-MiniLM-L6-v2")
... )
# Use local sentence transformers (no API key needed)
>>> embedder = get_embedding_function({
... "provider": "sentence-transformer",
... "model_name": "all-MiniLM-L6-v2"
... })
# Use Ollama for local embeddings
>>> embedder = get_embedding_function({
... "provider": "ollama",
... "model_name": "nomic-embed-text"
... })
# Use ONNX (no API key needed)
>>> embedder = get_embedding_function({
... "provider": "onnx"
... })
"""
if config is None:
return OpenAIEmbeddingFunction(
api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"
)
# Handle EmbeddingOptions object
if isinstance(config, EmbeddingOptions):
config_dict = config.model_dump(exclude_none=True)
else:
config_dict = config.copy()
provider = config_dict.pop("provider", "openai")
embedding_functions = {
"openai": OpenAIEmbeddingFunction,
"cohere": CohereEmbeddingFunction,
"ollama": OllamaEmbeddingFunction,
"huggingface": HuggingFaceEmbeddingFunction,
"sentence-transformer": SentenceTransformerEmbeddingFunction,
"instructor": InstructorEmbeddingFunction,
"google-palm": GooglePalmEmbeddingFunction,
"google-generativeai": GoogleGenerativeAiEmbeddingFunction,
"google-vertex": GoogleVertexEmbeddingFunction,
"amazon-bedrock": AmazonBedrockEmbeddingFunction,
"jina": JinaEmbeddingFunction,
"roboflow": RoboflowEmbeddingFunction,
"openclip": OpenCLIPEmbeddingFunction,
"text2vec": Text2VecEmbeddingFunction,
"onnx": ONNXMiniLM_L6_V2,
}
if provider not in embedding_functions:
raise ValueError(
f"Unsupported provider: {provider}. "
f"Available providers: {list(embedding_functions.keys())}"
)
return embedding_functions[provider](**config_dict)

View File

@@ -0,0 +1,62 @@
"""Type definitions for the embeddings module."""
from typing import Literal
from pydantic import BaseModel, Field, SecretStr
from crewai.rag.types import EmbeddingFunction
EmbeddingProvider = Literal[
"openai",
"cohere",
"ollama",
"huggingface",
"sentence-transformer",
"instructor",
"google-palm",
"google-generativeai",
"google-vertex",
"amazon-bedrock",
"jina",
"roboflow",
"openclip",
"text2vec",
"onnx",
]
"""Supported embedding providers.
These correspond to the embedding functions available in ChromaDB's
embedding_functions module. Each provider has specific requirements
and configuration options.
"""
class EmbeddingOptions(BaseModel):
"""Configuration options for embedding providers.
Generic attributes that can be passed to get_embedding_function
to configure various embedding providers.
"""
provider: EmbeddingProvider = Field(
..., description="Embedding provider name (e.g., 'openai', 'cohere', 'onnx')"
)
model_name: str | None = Field(
default=None, description="Model name for the embedding provider"
)
api_key: SecretStr | None = Field(
default=None, description="API key for the embedding provider"
)
class EmbeddingConfig(BaseModel):
"""Configuration wrapper for embedding functions.
Accepts either a pre-configured EmbeddingFunction or EmbeddingOptions
to create one. This provides flexibility in how embeddings are configured.
Attributes:
function: Either a callable EmbeddingFunction or EmbeddingOptions to create one
"""
function: EmbeddingFunction | EmbeddingOptions

50
src/crewai/rag/types.py Normal file
View File

@@ -0,0 +1,50 @@
"""Type definitions for RAG (Retrieval-Augmented Generation) systems."""
from collections.abc import Callable, Mapping
from typing import TypeAlias, TypedDict, Any
from typing_extensions import Required
class BaseRecord(TypedDict, total=False):
"""A typed dictionary representing a document record.
Attributes:
doc_id: Optional unique identifier for the document. If not provided,
a content-based ID will be generated using SHA256 hash.
content: The text content of the document (required)
metadata: Optional metadata associated with the document
"""
doc_id: str
content: Required[str]
metadata: (
Mapping[str, str | int | float | bool]
| list[Mapping[str, str | int | float | bool]]
)
DenseVector: TypeAlias = list[float]
IntVector: TypeAlias = list[int]
EmbeddingFunction: TypeAlias = Callable[..., Any]
class SearchResult(TypedDict):
"""Standard search result format for vector store queries.
This provides a consistent interface for search results across different
vector store implementations. Each implementation should convert their
native result format to this standard format.
Attributes:
id: Unique identifier of the document
content: The text content of the document
metadata: Optional metadata associated with the document
score: Similarity score (higher is better, typically between 0 and 1)
"""
id: str
content: str
metadata: dict[str, Any]
score: float

View File

@@ -14,12 +14,14 @@ from pydantic import BaseModel as PydanticBaseModel
from crewai.tools.structured_tool import CrewStructuredTool
class EnvVar(BaseModel):
name: str
description: str
required: bool = True
default: Optional[str] = None
class BaseTool(BaseModel, ABC):
class _ArgsSchemaPlaceholder(PydanticBaseModel):
pass
@@ -108,7 +110,7 @@ class BaseTool(BaseModel, ABC):
def to_structured_tool(self) -> CrewStructuredTool:
"""Convert this tool to a CrewStructuredTool instance."""
self._set_args_schema()
return CrewStructuredTool(
structured_tool = CrewStructuredTool(
name=self.name,
description=self.description,
args_schema=self.args_schema,
@@ -117,6 +119,8 @@ class BaseTool(BaseModel, ABC):
max_usage_count=self.max_usage_count,
current_usage_count=self.current_usage_count,
)
structured_tool._original_tool = self
return structured_tool
@classmethod
def from_langchain(cls, tool: Any) -> "BaseTool":
@@ -276,7 +280,9 @@ def to_langchain(
return [t.to_structured_tool() if isinstance(t, BaseTool) else t for t in tools]
def tool(*args, result_as_answer: bool = False, max_usage_count: int | None = None) -> Callable:
def tool(
*args, result_as_answer: bool = False, max_usage_count: int | None = None
) -> Callable:
"""
Decorator to create a tool from a function.

View File

@@ -10,6 +10,11 @@ from pydantic import BaseModel, Field, create_model
from crewai.utilities.logger import Logger
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from crewai.tools.base_tool import BaseTool
class CrewStructuredTool:
"""A structured tool that can operate on any number of inputs.
@@ -18,6 +23,8 @@ class CrewStructuredTool:
that integrates better with CrewAI's ecosystem.
"""
_original_tool: BaseTool | None = None
def __init__(
self,
name: str,
@@ -47,6 +54,7 @@ class CrewStructuredTool:
self.result_as_answer = result_as_answer
self.max_usage_count = max_usage_count
self.current_usage_count = current_usage_count
self._original_tool = None
# Validate the function signature matches the schema
self._validate_function_signature()
@@ -219,6 +227,8 @@ class CrewStructuredTool:
"""
parsed_args = self._parse_args(input)
self._increment_usage_count()
if inspect.iscoroutinefunction(self.func):
return await self.func(**parsed_args, **kwargs)
else:
@@ -242,6 +252,8 @@ class CrewStructuredTool:
"""Main method for tool execution."""
parsed_args = self._parse_args(input)
self._increment_usage_count()
if inspect.iscoroutinefunction(self.func):
result = asyncio.run(self.func(**parsed_args, **kwargs))
return result
@@ -253,6 +265,12 @@ class CrewStructuredTool:
return result
def _increment_usage_count(self) -> None:
"""Increment the usage count."""
self.current_usage_count += 1
if self._original_tool is not None:
self._original_tool.current_usage_count = self.current_usage_count
@property
def args(self) -> dict:
"""Get the tool's input arguments schema."""

View File

@@ -161,8 +161,10 @@ class EventListener(BaseEventListener):
def on_task_started(source, event: TaskStartedEvent):
span = self._telemetry.task_started(crew=source.agent.crew, task=source)
self.execution_spans[source] = span
# Pass both task ID and task name (if set)
task_name = source.name if hasattr(source, 'name') and source.name else None
self.formatter.create_task_branch(
self.formatter.current_crew_tree, source.id
self.formatter.current_crew_tree, source.id, task_name
)
@crewai_event_bus.on(TaskCompletedEvent)
@@ -173,11 +175,14 @@ class EventListener(BaseEventListener):
self._telemetry.task_ended(span, source, source.agent.crew)
self.execution_spans[source] = None
# Pass task name if it exists
task_name = source.name if hasattr(source, 'name') and source.name else None
self.formatter.update_task_status(
self.formatter.current_crew_tree,
source.id,
source.agent.role,
"completed",
task_name
)
@crewai_event_bus.on(TaskFailedEvent)
@@ -188,11 +193,14 @@ class EventListener(BaseEventListener):
self._telemetry.task_ended(span, source, source.agent.crew)
self.execution_spans[source] = None
# Pass task name if it exists
task_name = source.name if hasattr(source, 'name') and source.name else None
self.formatter.update_task_status(
self.formatter.current_crew_tree,
source.id,
source.agent.role,
"failed",
task_name
)
# ----------- AGENT EVENTS -----------

View File

@@ -220,14 +220,22 @@ class ConsoleFormatter:
return tree
def create_task_branch(
self, crew_tree: Optional[Tree], task_id: str
self, crew_tree: Optional[Tree], task_id: str, task_name: Optional[str] = None
) -> Optional[Tree]:
"""Create and initialize a task branch."""
if not self.verbose:
return None
task_content = Text()
task_content.append(f"📋 Task: {task_id}", style="yellow bold")
# Display task name if available, otherwise just the ID
if task_name:
task_content.append("📋 Task: ", style="yellow bold")
task_content.append(f"{task_name}", style="yellow bold")
task_content.append(f" (ID: {task_id})", style="yellow dim")
else:
task_content.append(f"📋 Task: {task_id}", style="yellow bold")
task_content.append("\nStatus: ", style="white")
task_content.append("Executing Task...", style="yellow dim")
@@ -251,6 +259,7 @@ class ConsoleFormatter:
task_id: str,
agent_role: str,
status: str = "completed",
task_name: Optional[str] = None,
) -> None:
"""Update task status in the tree."""
if not self.verbose or crew_tree is None:
@@ -270,8 +279,13 @@ class ConsoleFormatter:
if str(task_id) in str(branch.label):
# Build label without introducing stray blank lines
task_content = Text()
# First line: Task ID
task_content.append(f"📋 Task: {task_id}", style=f"{style} bold")
# First line: Task ID/name
if task_name:
task_content.append("📋 Task: ", style=f"{style} bold")
task_content.append(f"{task_name}", style=f"{style} bold")
task_content.append(f" (ID: {task_id})", style=f"{style} dim")
else:
task_content.append(f"📋 Task: {task_id}", style=f"{style} bold")
# Second line: Assigned to
task_content.append("\nAssigned to: ", style="white")
@@ -285,8 +299,9 @@ class ConsoleFormatter:
break
# Show status panel
display_name = task_name if task_name else str(task_id)
content = self.create_status_content(
f"Task {status.title()}", str(task_id), style, Agent=agent_role
f"Task {status.title()}", display_name, style, Agent=agent_role
)
self.print_panel(content, panel_title, style)

View File

@@ -0,0 +1,845 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are Iterating Agent. You
are an agent that iterates a given number of times\nYour personal goal is: Call
the iterating tool 5 times\nYou ONLY have access to the following tools, and
should NEVER make up tools that are not listed here:\n\nTool Name: iterating_tool\nTool
Arguments: {''input_text'': {''description'': None, ''type'': ''str''}}\nTool
Description: A tool that iterates a given number of times\n\nIMPORTANT: Use
the following format in your response:\n\n```\nThought: you should always think
about what to do\nAction: the action to take, only one name of [iterating_tool],
just the name, exactly as it''s written.\nAction Input: the input to the action,
just a simple JSON object, enclosed in curly braces, using \" to wrap keys and
values.\nObservation: the result of the action\n```\n\nOnce all necessary information
is gathered, return the following format:\n\n```\nThought: I now know the final
answer\nFinal Answer: the final answer to the original input question\n```"},
{"role": "user", "content": "\nCurrent Task: Call the iterating tool 5 times\n\nThis
is the expected criteria for your final answer: A list of the iterations\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o", "stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '1452'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//hFPRjtMwEHzPV6z83J5ySVvu8nYCIZ1ACKSCEOSUOs4mMefYlr2Boqr/
jpy0TQ7uxIsV7exMZmftQwTAZMUyYKLlJDqrlq837zbXn/flx6/fVPIp/fI+3ZbaxVv568ObR7YI
DFP+QEFn1pUwnVVI0ugRFg45YVC9frVeb5I0WacD0JkKVaA1lpYrs0ziZLWMb5bx5kRsjRToWQbf
IwCAw3AGi7rCPcsgXpwrHXrPG2TZpQmAOaNChXHvpSeuiS0mUBhNqAfXu90u19vW9E1LGdyDRqyA
DAiuFFCLIAkdJ6kbIGPGUi2dJyDZ4VWu70SYNpv6itB3rsO9tj1lcMiZDF8F4Z5ylkHO3g4qJ5rR
OTvOLTqse89DQrpXagZwrQ0NjCGchxNyvMShTGOdKf1fVFZLLX1bOOTe6DC6J2PZgB4jgIch9v5J
ksw601kqyDzi8Lvk9nbUY9OiJzRdn0AyxNWsnq4Wz+gVFRKXys8WxwQXLVYTddoy7ytpZkA0m/pf
N89pj5OPG/qv/AQIgZawKqzDSoqnE09tDsM7eKntkvJgmHl0P6XAgiS6sIkKa96r8Yoy/9sTdkUt
dYPOOjne09oWq01Z1zHG4oZFx+gPAAAA//8DAHvlcCKwAwAA
headers:
CF-RAY:
- 971b3f72effa6897-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 19 Aug 2025 17:07:35 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=6OZC5kPO9TEmVSACP2sSOZK9ZEZ5I4T_VUlfmzsoY9Y-1755623255-1.0.1.1-ek3SaNBOhXmCg7K3J7LIsE0aCrnK5YfSumHDT6nc8Df1Zh3bzMLHLDqTUwtqiG8SwxiIFXeGP4.Vt2sx9b3FCkxoyrqNpgrBL5DAffAGHm8;
path=/; expires=Tue, 19-Aug-25 17:37:35 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=R_H7SrOF3QFEWePZfvxzuyKWZAt5ulsNbP28.6DC9wM-1755623255760-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '2564'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '2751'
x-ratelimit-limit-project-tokens:
- '30000000'
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '30000000'
x-ratelimit-remaining-project-tokens:
- '29999674'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '29999674'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_d654df1116aa42ca8ee7d10b4b424303
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "system", "content": "You are Iterating Agent. You
are an agent that iterates a given number of times\nYour personal goal is: Call
the iterating tool 5 times\nYou ONLY have access to the following tools, and
should NEVER make up tools that are not listed here:\n\nTool Name: iterating_tool\nTool
Arguments: {''input_text'': {''description'': None, ''type'': ''str''}}\nTool
Description: A tool that iterates a given number of times\n\nIMPORTANT: Use
the following format in your response:\n\n```\nThought: you should always think
about what to do\nAction: the action to take, only one name of [iterating_tool],
just the name, exactly as it''s written.\nAction Input: the input to the action,
just a simple JSON object, enclosed in curly braces, using \" to wrap keys and
values.\nObservation: the result of the action\n```\n\nOnce all necessary information
is gathered, return the following format:\n\n```\nThought: I now know the final
answer\nFinal Answer: the final answer to the original input question\n```"},
{"role": "user", "content": "\nCurrent Task: Call the iterating tool 5 times\n\nThis
is the expected criteria for your final answer: A list of the iterations\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}, {"role": "assistant", "content":
"```\nThought: I need to call the iterating tool the first time.\nAction: iterating_tool\nAction
Input: {\"input_text\": \"First iteration\"}\nObservation: Iteration 0: First
iteration"}], "model": "gpt-4o", "stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '1673'
content-type:
- application/json
cookie:
- __cf_bm=6OZC5kPO9TEmVSACP2sSOZK9ZEZ5I4T_VUlfmzsoY9Y-1755623255-1.0.1.1-ek3SaNBOhXmCg7K3J7LIsE0aCrnK5YfSumHDT6nc8Df1Zh3bzMLHLDqTUwtqiG8SwxiIFXeGP4.Vt2sx9b3FCkxoyrqNpgrBL5DAffAGHm8;
_cfuvid=R_H7SrOF3QFEWePZfvxzuyKWZAt5ulsNbP28.6DC9wM-1755623255760-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA4RTTW/bMAy9+1cQOidFlg8v8G1LB6zosbvNhaNItK3NFjWJ7loE+e+D7CR2uw67
CAIf3xP5SB0TAGG0yECoWrJqXTPfpffp2tWqu93u7m+Xu+3Dl6/m96+X+pB+VmIWGXT4gYovrBtF
rWuQDdkBVh4lY1T98HGzSZer5SbtgZY0NpFWOZ6vab5cLNfzxXa+SM/EmozCIDL4ngAAHPszlmg1
PosMFrNLpMUQZIUiuyYBCE9NjAgZggksLYvZCCqyjLaver/f5/ZbTV1VcwZ3UMsnhHMXqIFrhNL4
wGAYvYyNgbQaLEaQwHlS52tMDajIaiCLN7n9pGJ6dmHaqmCi5hKHO+s6zuCYCxNvBeMz5yKDXDwM
KtcXc3GaVu+x7IKM5tmuaSaAtJa4Z/S+PZ6R09Wphirn6RDeUEVprAl14VEGstGVwOREj54SgMd+
It0rk4Xz1DoumH5i/9xqvRn0xLgDE3R7BplYNtP4avaOXqGRpWnCZKZCSVWjHqnjAshOG5oAyaTr
v6t5T3vofBjRf+VHQCl0jLpwHrVRrzse0zzGL/KvtKvLfcEioH8yCgs26OMkNJaya4btFeElMLZF
aWyF3nkzrHDpinV6KMsFLtRWJKfkDwAAAP//AwAMYddzywMAAA==
headers:
CF-RAY:
- 971b3f84cf146897-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 19 Aug 2025 17:07:38 GMT
Server:
- cloudflare
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '1900'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '2551'
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '30000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '29999629'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_f4181fe581264993ac5c6deba4f1c287
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "system", "content": "You are Iterating Agent. You
are an agent that iterates a given number of times\nYour personal goal is: Call
the iterating tool 5 times\nYou ONLY have access to the following tools, and
should NEVER make up tools that are not listed here:\n\nTool Name: iterating_tool\nTool
Arguments: {''input_text'': {''description'': None, ''type'': ''str''}}\nTool
Description: A tool that iterates a given number of times\n\nIMPORTANT: Use
the following format in your response:\n\n```\nThought: you should always think
about what to do\nAction: the action to take, only one name of [iterating_tool],
just the name, exactly as it''s written.\nAction Input: the input to the action,
just a simple JSON object, enclosed in curly braces, using \" to wrap keys and
values.\nObservation: the result of the action\n```\n\nOnce all necessary information
is gathered, return the following format:\n\n```\nThought: I now know the final
answer\nFinal Answer: the final answer to the original input question\n```"},
{"role": "user", "content": "\nCurrent Task: Call the iterating tool 5 times\n\nThis
is the expected criteria for your final answer: A list of the iterations\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}, {"role": "assistant", "content":
"```\nThought: I need to call the iterating tool the first time.\nAction: iterating_tool\nAction
Input: {\"input_text\": \"First iteration\"}\nObservation: Iteration 0: First
iteration"}, {"role": "assistant", "content": "```\nThought: I have completed
the first iteration and need to proceed to the second one.\nAction: iterating_tool\nAction
Input: {\"input_text\": \"Second iteration\"}\nObservation: Iteration 0: Second
iteration"}], "model": "gpt-4o", "stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '1922'
content-type:
- application/json
cookie:
- __cf_bm=6OZC5kPO9TEmVSACP2sSOZK9ZEZ5I4T_VUlfmzsoY9Y-1755623255-1.0.1.1-ek3SaNBOhXmCg7K3J7LIsE0aCrnK5YfSumHDT6nc8Df1Zh3bzMLHLDqTUwtqiG8SwxiIFXeGP4.Vt2sx9b3FCkxoyrqNpgrBL5DAffAGHm8;
_cfuvid=R_H7SrOF3QFEWePZfvxzuyKWZAt5ulsNbP28.6DC9wM-1755623255760-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//hFNNb9swDL37VxA6J0XmfCz1bRswNBh2GXrYsBSOItG2ElsUJLrIUOS/
D7KT2N067CIIfHxP5CP1kgAIo0UGQlWSVePq6afVl9Xq9HAI3z6uj8eHz6os9OGrvt98T9sfYhIZ
tD+g4ivrTlHjamRDtoeVR8kYVd+9Xy5X6TxdrjugIY11pJWOpwuaprN0MZ2tp7PVhViRURhEBj8T
AICX7owlWo0nkcFsco00GIIsUWS3JADhqY4RIUMwgaVlMRlARZbRdlXvdrutfayoLSvOYAOVfEa4
dIEauEIIqMhqMIxexs5AWg0WI0rgPKnLNeZyZbwGsni3tR9UzM6uRFvmTFRf47CxruUMXrbCxFvO
eOKtyGArHjuR23tbcR4X77Fog4ze2bauR4C0lrhjdLY9XZDzzaiaSudpH/6gisJYE6rcowxkoymB
yYkOPScAT91A2lceC+epcZwzHbF7bn6/6PXEsAIjdH0BmVjWQ3wxTydv6OUaWZo6jEYqlFQV6oE6
zF+22tAISEZd/13NW9p95/2E/is/AEqhY9S586iNet3xkOYx/pB/pd1c7goWAf2zUZizQR8nobGQ
bd0vrwi/AmOTF8aW6J03/QYXLl+s9kUxw5lai+Sc/AYAAP//AwDpY7tZygMAAA==
headers:
CF-RAY:
- 971b3f958e746897-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 19 Aug 2025 17:07:39 GMT
Server:
- cloudflare
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '890'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '906'
x-ratelimit-limit-project-tokens:
- '30000000'
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '30000000'
x-ratelimit-remaining-project-tokens:
- '29999577'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '29999577'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_b3632e88268747218e4cc4cc08d87bca
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "system", "content": "You are Iterating Agent. You
are an agent that iterates a given number of times\nYour personal goal is: Call
the iterating tool 5 times\nYou ONLY have access to the following tools, and
should NEVER make up tools that are not listed here:\n\nTool Name: iterating_tool\nTool
Arguments: {''input_text'': {''description'': None, ''type'': ''str''}}\nTool
Description: A tool that iterates a given number of times\n\nIMPORTANT: Use
the following format in your response:\n\n```\nThought: you should always think
about what to do\nAction: the action to take, only one name of [iterating_tool],
just the name, exactly as it''s written.\nAction Input: the input to the action,
just a simple JSON object, enclosed in curly braces, using \" to wrap keys and
values.\nObservation: the result of the action\n```\n\nOnce all necessary information
is gathered, return the following format:\n\n```\nThought: I now know the final
answer\nFinal Answer: the final answer to the original input question\n```"},
{"role": "user", "content": "\nCurrent Task: Call the iterating tool 5 times\n\nThis
is the expected criteria for your final answer: A list of the iterations\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}, {"role": "assistant", "content":
"```\nThought: I need to call the iterating tool the first time.\nAction: iterating_tool\nAction
Input: {\"input_text\": \"First iteration\"}\nObservation: Iteration 0: First
iteration"}, {"role": "assistant", "content": "```\nThought: I have completed
the first iteration and need to proceed to the second one.\nAction: iterating_tool\nAction
Input: {\"input_text\": \"Second iteration\"}\nObservation: Iteration 0: Second
iteration"}, {"role": "assistant", "content": "```\nThought: I have completed
the second iteration and need to proceed to the third one.\nAction: iterating_tool\nAction
Input: {\"input_text\": \"Third iteration\"}\nObservation: Iteration 0: Third
iteration\n\n\nYou ONLY have access to the following tools, and should NEVER
make up tools that are not listed here:\n\nTool Name: iterating_tool\nTool Arguments:
{''input_text'': {''description'': None, ''type'': ''str''}}\nTool Description:
A tool that iterates a given number of times\n\nIMPORTANT: Use the following
format in your response:\n\n```\nThought: you should always think about what
to do\nAction: the action to take, only one name of [iterating_tool], just the
name, exactly as it''s written.\nAction Input: the input to the action, just
a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation:
the result of the action\n```\n\nOnce all necessary information is gathered,
return the following format:\n\n```\nThought: I now know the final answer\nFinal
Answer: the final answer to the original input question\n```"}], "model": "gpt-4o",
"stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '3018'
content-type:
- application/json
cookie:
- __cf_bm=6OZC5kPO9TEmVSACP2sSOZK9ZEZ5I4T_VUlfmzsoY9Y-1755623255-1.0.1.1-ek3SaNBOhXmCg7K3J7LIsE0aCrnK5YfSumHDT6nc8Df1Zh3bzMLHLDqTUwtqiG8SwxiIFXeGP4.Vt2sx9b3FCkxoyrqNpgrBL5DAffAGHm8;
_cfuvid=R_H7SrOF3QFEWePZfvxzuyKWZAt5ulsNbP28.6DC9wM-1755623255760-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//hFPBbtswDL37Kwidk8JzErfxbdgwtNh1GwrMhaPItK3MFgWJLloE+fdB
dhK7W4ddBIGP74l8pI4RgNClyECoRrLqbLv8lH5Nb8k9Hu7T5PP37f361R1Wj1us6IeVYhEYtD+g
4gvrRlFnW2RNZoSVQ8kYVD/cbjZpsko22wHoqMQ20GrLyzUtkzhZL+O7ZZyeiQ1phV5k8DMCADgO
ZyjRlPgiMogXl0iH3ssaRXZNAhCO2hAR0nvtWRoWiwlUZBjNUPVut8vNt4b6uuEMHqCRzwjnLrAE
bhC40a4EzehkaAykKcFgAAmsI3W+htSKescNkMGb3HxUIT27ME1dMFF7icODsT1ncMyFDreC8YVz
kUEuvowq1xdzcZpX77DqvQzmmb5tZ4A0hnhgDL49nZHT1amWauto7/+gikob7ZvCofRkgiueyYoB
PUUAT8NE+jcmC+uos1ww/cLhuTRJRz0x7cCEru7OIBPLdsZK14t39IoSWerWz2YqlFQNlhN1WgDZ
l5pmQDTr+u9q3tMeOx9H9F/5CVAKLWNZWIelVm87ntIchi/yr7Sry0PBwqN71goL1ujCJEqsZN+O
2yv8q2fsikqbGp11elzhyhbrdF9VMcbqTkSn6DcAAAD//wMACjMtRssDAAA=
headers:
CF-RAY:
- 971b3f9bbef86897-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 19 Aug 2025 17:07:40 GMT
Server:
- cloudflare
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '1182'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '1208'
x-ratelimit-limit-project-tokens:
- '30000000'
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '30000000'
x-ratelimit-remaining-project-tokens:
- '29999320'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '29999320'
x-ratelimit-reset-project-tokens:
- 1ms
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 1ms
x-request-id:
- req_7fc641fabc634f29ae085ef176071402
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "system", "content": "You are Iterating Agent. You
are an agent that iterates a given number of times\nYour personal goal is: Call
the iterating tool 5 times\nYou ONLY have access to the following tools, and
should NEVER make up tools that are not listed here:\n\nTool Name: iterating_tool\nTool
Arguments: {''input_text'': {''description'': None, ''type'': ''str''}}\nTool
Description: A tool that iterates a given number of times\n\nIMPORTANT: Use
the following format in your response:\n\n```\nThought: you should always think
about what to do\nAction: the action to take, only one name of [iterating_tool],
just the name, exactly as it''s written.\nAction Input: the input to the action,
just a simple JSON object, enclosed in curly braces, using \" to wrap keys and
values.\nObservation: the result of the action\n```\n\nOnce all necessary information
is gathered, return the following format:\n\n```\nThought: I now know the final
answer\nFinal Answer: the final answer to the original input question\n```"},
{"role": "user", "content": "\nCurrent Task: Call the iterating tool 5 times\n\nThis
is the expected criteria for your final answer: A list of the iterations\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}, {"role": "assistant", "content":
"```\nThought: I need to call the iterating tool the first time.\nAction: iterating_tool\nAction
Input: {\"input_text\": \"First iteration\"}\nObservation: Iteration 0: First
iteration"}, {"role": "assistant", "content": "```\nThought: I have completed
the first iteration and need to proceed to the second one.\nAction: iterating_tool\nAction
Input: {\"input_text\": \"Second iteration\"}\nObservation: Iteration 0: Second
iteration"}, {"role": "assistant", "content": "```\nThought: I have completed
the second iteration and need to proceed to the third one.\nAction: iterating_tool\nAction
Input: {\"input_text\": \"Third iteration\"}\nObservation: Iteration 0: Third
iteration\n\n\nYou ONLY have access to the following tools, and should NEVER
make up tools that are not listed here:\n\nTool Name: iterating_tool\nTool Arguments:
{''input_text'': {''description'': None, ''type'': ''str''}}\nTool Description:
A tool that iterates a given number of times\n\nIMPORTANT: Use the following
format in your response:\n\n```\nThought: you should always think about what
to do\nAction: the action to take, only one name of [iterating_tool], just the
name, exactly as it''s written.\nAction Input: the input to the action, just
a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation:
the result of the action\n```\n\nOnce all necessary information is gathered,
return the following format:\n\n```\nThought: I now know the final answer\nFinal
Answer: the final answer to the original input question\n```"}, {"role": "assistant",
"content": "```\nThought: I have completed the third iteration and need to proceed
to the fourth one.\nAction: iterating_tool\nAction Input: {\"input_text\": \"Fourth
iteration\"}\nObservation: Iteration 0: Fourth iteration"}], "model": "gpt-4o",
"stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '3267'
content-type:
- application/json
cookie:
- __cf_bm=6OZC5kPO9TEmVSACP2sSOZK9ZEZ5I4T_VUlfmzsoY9Y-1755623255-1.0.1.1-ek3SaNBOhXmCg7K3J7LIsE0aCrnK5YfSumHDT6nc8Df1Zh3bzMLHLDqTUwtqiG8SwxiIFXeGP4.Vt2sx9b3FCkxoyrqNpgrBL5DAffAGHm8;
_cfuvid=R_H7SrOF3QFEWePZfvxzuyKWZAt5ulsNbP28.6DC9wM-1755623255760-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA4RTTW/bMAy9+1cQOieFmyZO5ts2bECxDwztgB3mwlFk2tZmi4JEFx2C/PdBchKn
W4ddBIGP74l8pPYJgNCVyEGoVrLqbTd/m33INvJ+0NnnT/ff3tTZR72483eSv7xLScwCg3Y/UPGJ
daWotx2yJjPCyqFkDKrX69UqW9wssjQCPVXYBVpjeb6k+SJdLOfpZp5mR2JLWqEXOXxPAAD28Qwl
mgqfRA5RJkZ69F42KPJzEoBw1IWIkN5rz9KwmE2gIsNoYtXb7bYwX1sampZzuIVWPiIcu8AKuEWo
aXDcgmZ0MnQG0lRgMKAE1pE6XmOurrkFMnhVmNcqZOcnomlKJupOcbg1duAc9oXQ4VYyPnEhcijE
+yhyfq8Qh8viHdaDl8E7M3TdBSCNIY6MaNvDETmcjeqosY52/g+qqLXRvi0dSk8mmOKZrIjoIQF4
iAMZnnksrKPecsn0E+Nz2Xo16olpBSb05tURZGLZTfH19XL2gl5ZIUvd+YuRCiVVi9VEneYvh0rT
BZBcdP13NS9pj52PE/qv/AQohZaxKq3DSqvnHU9pDsMP+Vfa2eVYsPDoHrXCkjW6MIkKazl04/IK
/8sz9mWtTYPOOj1ucG3LZbar6xRTtRHJIfkNAAD//wMANy12ZMoDAAA=
headers:
CF-RAY:
- 971b3fa3ea2f6897-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 19 Aug 2025 17:07:41 GMT
Server:
- cloudflare
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '780'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '878'
x-ratelimit-limit-project-tokens:
- '30000000'
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '30000000'
x-ratelimit-remaining-project-tokens:
- '29999268'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '29999268'
x-ratelimit-reset-project-tokens:
- 1ms
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 1ms
x-request-id:
- req_603a6c645bac468888838d21c64db11f
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "system", "content": "You are Iterating Agent. You
are an agent that iterates a given number of times\nYour personal goal is: Call
the iterating tool 5 times\nYou ONLY have access to the following tools, and
should NEVER make up tools that are not listed here:\n\nTool Name: iterating_tool\nTool
Arguments: {''input_text'': {''description'': None, ''type'': ''str''}}\nTool
Description: A tool that iterates a given number of times\n\nIMPORTANT: Use
the following format in your response:\n\n```\nThought: you should always think
about what to do\nAction: the action to take, only one name of [iterating_tool],
just the name, exactly as it''s written.\nAction Input: the input to the action,
just a simple JSON object, enclosed in curly braces, using \" to wrap keys and
values.\nObservation: the result of the action\n```\n\nOnce all necessary information
is gathered, return the following format:\n\n```\nThought: I now know the final
answer\nFinal Answer: the final answer to the original input question\n```"},
{"role": "user", "content": "\nCurrent Task: Call the iterating tool 5 times\n\nThis
is the expected criteria for your final answer: A list of the iterations\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}, {"role": "assistant", "content":
"```\nThought: I need to call the iterating tool the first time.\nAction: iterating_tool\nAction
Input: {\"input_text\": \"First iteration\"}\nObservation: Iteration 0: First
iteration"}, {"role": "assistant", "content": "```\nThought: I have completed
the first iteration and need to proceed to the second one.\nAction: iterating_tool\nAction
Input: {\"input_text\": \"Second iteration\"}\nObservation: Iteration 0: Second
iteration"}, {"role": "assistant", "content": "```\nThought: I have completed
the second iteration and need to proceed to the third one.\nAction: iterating_tool\nAction
Input: {\"input_text\": \"Third iteration\"}\nObservation: Iteration 0: Third
iteration\n\n\nYou ONLY have access to the following tools, and should NEVER
make up tools that are not listed here:\n\nTool Name: iterating_tool\nTool Arguments:
{''input_text'': {''description'': None, ''type'': ''str''}}\nTool Description:
A tool that iterates a given number of times\n\nIMPORTANT: Use the following
format in your response:\n\n```\nThought: you should always think about what
to do\nAction: the action to take, only one name of [iterating_tool], just the
name, exactly as it''s written.\nAction Input: the input to the action, just
a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation:
the result of the action\n```\n\nOnce all necessary information is gathered,
return the following format:\n\n```\nThought: I now know the final answer\nFinal
Answer: the final answer to the original input question\n```"}, {"role": "assistant",
"content": "```\nThought: I have completed the third iteration and need to proceed
to the fourth one.\nAction: iterating_tool\nAction Input: {\"input_text\": \"Fourth
iteration\"}\nObservation: Iteration 0: Fourth iteration"}, {"role": "assistant",
"content": "```\nThought: I have completed the fourth iteration and need to
proceed to the fifth one.\nAction: iterating_tool\nAction Input: {\"input_text\":
\"Fifth iteration\"}\nObservation: Iteration 0: Fifth iteration"}], "model":
"gpt-4o", "stop": ["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '3514'
content-type:
- application/json
cookie:
- __cf_bm=6OZC5kPO9TEmVSACP2sSOZK9ZEZ5I4T_VUlfmzsoY9Y-1755623255-1.0.1.1-ek3SaNBOhXmCg7K3J7LIsE0aCrnK5YfSumHDT6nc8Df1Zh3bzMLHLDqTUwtqiG8SwxiIFXeGP4.Vt2sx9b3FCkxoyrqNpgrBL5DAffAGHm8;
_cfuvid=R_H7SrOF3QFEWePZfvxzuyKWZAt5ulsNbP28.6DC9wM-1755623255760-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA4xTy26jMBTd8xVXXpOK0IRE7KqRInVazaZZTROBY1/AU2NT26QzqvLvI0MSSNuJ
ZsPC58E99/EeABDBSQqEVdSxupGTb8lDcne7pt8Xb/r14XX5tJiufiT1z8f944smoVfo3S9k7qS6
YbpuJDqhVQ8zg9Shd50u5vMkvo2TuANqzVF6Wdm4yUxP4iieTaLlJEqOwkoLhpak8BwAALx3X1+i
4vibpBCFp5caraUlkvRMAiBGS/9CqLXCOqocCQeQaeVQdVXneb5R60q3ZeVSuIeK7hGOKZADlRLm
IBwa6kPZm41aCUUl3Cn7hiaF5w25P6EQpbASxrpBsCEhfGA8IdOKX6WsK2GuM1a6Na66ThHFJWO7
UXmej/tgsGgt9WNQrZQjgCqlXZ/YT2B7RA7nnktdNkbv7AcpKYQStsoMUquV7691uiEdeggAtt1s
24txkcbounGZ0y/Y/W4Rz3s/MmzTgM6TI+i0o3KkWk7DL/wyjo4KaUfbQRhlFfJBOqwSbbnQIyAY
pf5czVfefXKhyv+xHwDGsHHIs8YgF+wy8UAz6I/tX7Rzl7uCiUWzFwwzJ9D4SXAsaCv7OyD2j3VY
Z4VQJZrGiP4YiiabJbuiiDBiSxIcgr8AAAD//wMAnPTcwxUEAAA=
headers:
CF-RAY:
- 971b3faa6ba46897-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 19 Aug 2025 17:07:43 GMT
Server:
- cloudflare
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '1392'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '1841'
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '30000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '29999217'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 1ms
x-request-id:
- req_19dc255cec9d4763be7d5f597c80e936
status:
code: 200
message: OK
version: 1

View File

@@ -497,6 +497,131 @@ def test_unstructured_flow_event_emission():
assert isinstance(received_events[6].timestamp, datetime)
def test_flow_trigger_payload_injection():
captured_payload = []
class TriggerFlow(Flow):
@start()
def start_method(self, crewai_trigger_payload=None):
captured_payload.append(crewai_trigger_payload)
return "started"
@listen(start_method)
def second_method(self):
captured_payload.append("no_parameter")
return "finished"
flow = TriggerFlow()
test_payload = "This is important trigger data"
flow.kickoff(inputs={"crewai_trigger_payload": test_payload})
assert captured_payload == [test_payload, "no_parameter"]
def test_flow_trigger_payload_injection_multiple_starts():
captured_payloads = []
class MultiStartFlow(Flow):
@start()
def start_method_1(self, crewai_trigger_payload=None):
captured_payloads.append(("start_1", crewai_trigger_payload))
return "start_1_done"
@start()
def start_method_2(self, crewai_trigger_payload=None):
captured_payloads.append(("start_2", crewai_trigger_payload))
return "start_2_done"
flow = MultiStartFlow()
test_payload = "Multiple start trigger data"
flow.kickoff(inputs={"crewai_trigger_payload": test_payload})
assert captured_payloads == [("start_1", test_payload), ("start_2", test_payload)]
def test_flow_without_trigger_payload():
captured_payload = []
class NormalFlow(Flow):
@start()
def start_method(self, crewai_trigger_payload=None):
captured_payload.append(crewai_trigger_payload)
return "no_trigger"
flow = NormalFlow()
flow.kickoff(inputs={"other_data": "some value"})
assert captured_payload[0] is None
def test_flow_trigger_payload_with_structured_state():
class TriggerState(BaseModel):
id: str = "test"
message: str = ""
class StructuredFlow(Flow[TriggerState]):
@start()
def start_method(self, crewai_trigger_payload=None):
return crewai_trigger_payload
flow = StructuredFlow()
test_payload = "Structured state trigger data"
result = flow.kickoff(inputs={"crewai_trigger_payload": test_payload})
assert result == test_payload
def test_flow_start_method_without_trigger_parameter():
execution_order = []
class FlowWithoutParameter(Flow):
@start()
def start_without_param(self):
execution_order.append("start_executed")
return "started"
@listen(start_without_param)
def second_method(self):
execution_order.append("second_executed")
return "finished"
flow = FlowWithoutParameter()
result = flow.kickoff(inputs={"crewai_trigger_payload": "some data"})
assert execution_order == ["start_executed", "second_executed"]
assert result == "finished"
def test_async_flow_with_trigger_payload():
captured_payload = []
class AsyncTriggerFlow(Flow):
@start()
async def async_start_method(self, crewai_trigger_payload=None):
captured_payload.append(crewai_trigger_payload)
await asyncio.sleep(0.01)
return "async_started"
@listen(async_start_method)
async def async_second_method(self, result):
captured_payload.append(result)
await asyncio.sleep(0.01)
return "async_finished"
flow = AsyncTriggerFlow()
test_payload = "Async trigger data"
result = asyncio.run(flow.kickoff_async(inputs={"crewai_trigger_payload": test_payload}))
assert captured_payload == [test_payload, "async_started"]
assert result == "async_finished"
def test_structured_flow_event_emission():
"""Test that the correct events are emitted during structured flow
execution with all fields validated."""

View File

@@ -1,9 +1,12 @@
import asyncio
import inspect
import unittest
from typing import Any, Callable, Dict, List
from typing import Callable
from unittest.mock import patch
import pytest
from crewai.agent import Agent
from crewai.crew import Crew
from crewai.task import Task
from crewai.tools import BaseTool, tool
@@ -130,6 +133,7 @@ def test_result_as_answer_in_tool_decorator():
class SyncTool(BaseTool):
"""Test implementation with a synchronous _run method"""
name: str = "sync_tool"
description: str = "A synchronous tool for testing"
@@ -140,6 +144,7 @@ class SyncTool(BaseTool):
class AsyncTool(BaseTool):
"""Test implementation with an asynchronous _run method"""
name: str = "async_tool"
description: str = "An asynchronous tool for testing"
@@ -174,7 +179,7 @@ def test_run_calls_asyncio_run_for_async_tools():
"""Test that asyncio.run is called when using async tools."""
async_tool = AsyncTool()
with patch('asyncio.run') as mock_run:
with patch("asyncio.run") as mock_run:
mock_run.return_value = "Processed test asynchronously"
async_result = async_tool.run(input_text="test")
@@ -186,9 +191,43 @@ def test_run_does_not_call_asyncio_run_for_sync_tools():
"""Test that asyncio.run is NOT called when using sync tools."""
sync_tool = SyncTool()
with patch('asyncio.run') as mock_run:
with patch("asyncio.run") as mock_run:
sync_result = sync_tool.run(input_text="test")
mock_run.assert_not_called()
assert sync_result == "Processed test synchronously"
@pytest.mark.vcr(filter_headers=["authorization"])
def test_max_usage_count_is_respected():
class IteratingTool(BaseTool):
name: str = "iterating_tool"
description: str = "A tool that iterates a given number of times"
def _run(self, input_text: str):
return f"Iteration {input_text}"
tool = IteratingTool(max_usage_count=5)
agent = Agent(
role="Iterating Agent",
goal="Call the iterating tool 5 times",
backstory="You are an agent that iterates a given number of times",
tools=[tool],
)
task = Task(
description="Call the iterating tool 5 times",
expected_output="A list of the iterations",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task],
verbose=True,
)
crew.kickoff()
assert tool.max_usage_count == 5
assert tool.current_usage_count == 5

4
uv.lock generated
View File

@@ -1,5 +1,5 @@
version = 1
revision = 2
revision = 3
requires-python = ">=3.10, <3.14"
resolution-markers = [
"python_full_version >= '3.13' and platform_python_implementation == 'PyPy' and sys_platform == 'darwin'",
@@ -787,7 +787,7 @@ requires-dist = [
{ name = "litellm", specifier = "==1.74.9" },
{ name = "mem0ai", marker = "extra == 'mem0'", specifier = ">=0.1.94" },
{ name = "onnxruntime", specifier = "==1.22.0" },
{ name = "openai", specifier = "<1.100.0" },
{ name = "openai", specifier = ">=1.13.3" },
{ name = "openpyxl", specifier = ">=3.1.5" },
{ name = "openpyxl", marker = "extra == 'openpyxl'", specifier = ">=3.1.5" },
{ name = "opentelemetry-api", specifier = ">=1.30.0" },