Compare commits

..

49 Commits

Author SHA1 Message Date
Lorenze Jay
c0ad4576e2 Merge branch 'main' of github.com:crewAIInc/crewAI into knowledge 2024-11-20 15:36:40 -08:00
Lorenze Jay
6359b64d22 added docstrings and type hints for cli 2024-11-20 15:36:12 -08:00
Lorenze Jay
9329119f76 clearer docs 2024-11-20 14:05:15 -08:00
Lorenze Jay
38c0d61b11 more fixes 2024-11-20 14:02:12 -08:00
Lorenze Jay
8564f5551f rm print 2024-11-20 13:49:58 -08:00
Lorenze Jay
8a5404275f linted 2024-11-20 13:48:11 -08:00
Lorenze Jay
52189a46bc more docs 2024-11-20 13:43:08 -08:00
Lorenze Jay
44ab749fda improvements from review 2024-11-20 13:32:00 -08:00
Lorenze Jay
3c4504bd4f better docs 2024-11-20 13:31:13 -08:00
Lorenze Jay
23276cbd76 adding docs 2024-11-19 18:31:09 -08:00
Lorenze Jay
fe18da5e11 fix 2024-11-19 18:22:05 -08:00
Lorenze Jay
76da972ce9 put a flag 2024-11-19 17:42:44 -08:00
Lorenze Jay
4663997b4c verbose run 2024-11-19 17:31:53 -08:00
Lorenze Jay
b185b9e289 linted 2024-11-19 17:29:06 -08:00
Lorenze Jay
787f2eaa7c mock knowledge query to not spin up db 2024-11-19 17:27:17 -08:00
Lorenze Jay
e7d816fb2a Merge branch 'main' of github.com:crewAIInc/crewAI into knowledge 2024-11-19 15:09:33 -08:00
Lorenze Jay
8373c9b521 linted 2024-11-19 14:50:26 -08:00
Lorenze Jay
ec2fe6ff91 just mocks 2024-11-19 14:48:00 -08:00
Lorenze Jay
58bf2d57f7 added extra cassette 2024-11-19 14:16:22 -08:00
Lorenze Jay
705ee16c1c type check fixes 2024-11-19 12:06:29 -08:00
Lorenze Jay
0c5b6f2a93 mypysrc fixes 2024-11-19 12:02:06 -08:00
Lorenze Jay
914067df37 fixed text_file_knowledge 2024-11-19 11:39:18 -08:00
Lorenze Jay
de742c827d improvements 2024-11-19 11:27:01 -08:00
Lorenze Jay
efa8a378a1 None embedder to use default on pipeline cloning 2024-11-19 10:53:09 -08:00
Lorenze Jay
e882725b8a updated default embedder 2024-11-19 10:43:06 -08:00
Lorenze Jay
cbfdbe3b68 generating cassettes for knowledge test 2024-11-19 10:10:14 -08:00
Lorenze Jay
c8bf242633 fix duplicate 2024-11-19 09:59:23 -08:00
Lorenze Jay
70910dd7b4 fix test 2024-11-19 09:41:33 -08:00
Lorenze Jay
b104404418 cleanup rm unused embedder 2024-11-18 16:03:48 -08:00
Lorenze Jay
d579c5ae12 linted 2024-11-18 13:58:23 -08:00
Lorenze Jay
4831dcb85b Merge branch 'main' of github.com:crewAIInc/crewAI into knowledge 2024-11-18 13:55:32 -08:00
Lorenze Jay
cbfcde73ec consolodation and improvements 2024-11-18 13:52:33 -08:00
Lorenze Jay
b2c06d5b7a properly reset memory+knowledge 2024-11-18 13:45:43 -08:00
Lorenze Jay
352d05370e properly reset memory 2024-11-18 13:37:16 -08:00
Lorenze Jay
b90793874c return this 2024-11-15 15:51:07 -08:00
Lorenze Jay
cdf5233523 Merge branch 'main' of github.com:crewAIInc/crewAI into knowledge 2024-11-15 15:42:32 -08:00
Lorenze Jay
cb03ee60b8 improvements all around Knowledge class 2024-11-15 15:28:07 -08:00
Lorenze Jay
10f445e18a ensure embeddings are persisted 2024-11-14 18:31:07 -08:00
Lorenze Jay
98a708ca15 Merge branch 'main' of github.com:crewAIInc/crewAI into knowledge 2024-11-14 12:22:07 -08:00
Brandon Hancock
7b59c5b049 adding in lorenze feedback 2024-11-07 12:10:09 -05:00
Brandon Hancock
86ede8344c update yaml to include optional deps 2024-11-07 11:41:49 -05:00
Brandon Hancock
59165cbad8 fix linting 2024-11-07 11:37:06 -05:00
Brandon Hancock
4af263ca1e Merge branch 'main' into knowledge 2024-11-07 11:33:08 -05:00
Brandon Hancock
617ee989cd added additional sources 2024-11-06 16:41:17 -05:00
Brandon Hancock
6131dbac4f Improve types and better support for file paths 2024-11-06 15:57:03 -05:00
Brandon Hancock
1a35114c08 Adding core knowledge sources 2024-11-06 12:33:55 -05:00
Brandon Hancock
a8a2f80616 WIP 2024-11-05 12:04:58 -05:00
Brandon Hancock
dc314c1151 Merge branch 'main' into knowledge 2024-11-04 15:02:47 -05:00
João Moura
75322b2de1 initial knowledge 2024-11-04 15:53:19 -03:00
48 changed files with 233 additions and 1522 deletions

View File

@@ -6,7 +6,7 @@ jobs:
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
- name: Install Requirements
run: |

View File

@@ -13,10 +13,10 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v2
- name: Setup Python
uses: actions/setup-python@v5
uses: actions/setup-python@v4
with:
python-version: '3.10'
@@ -25,7 +25,7 @@ jobs:
run: echo "::set-output name=hash::$(sha256sum requirements-doc.txt | awk '{print $1}')"
- name: Setup cache
uses: actions/cache@v4
uses: actions/cache@v3
with:
key: mkdocs-material-${{ steps.req-hash.outputs.hash }}
path: .cache
@@ -42,4 +42,4 @@ jobs:
GH_TOKEN: ${{ secrets.GH_TOKEN }}
- name: Build and deploy MkDocs
run: mkdocs gh-deploy --force
run: mkdocs gh-deploy --force

View File

@@ -11,7 +11,7 @@ jobs:
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
uses: actions/setup-python@v4
with:
python-version: "3.11.9"

View File

@@ -14,7 +14,7 @@ jobs:
uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v5
uses: actions/setup-python@v4
with:
python-version: "3.11.9"

View File

@@ -100,7 +100,7 @@ You can now start developing your crew by editing the files in the `src/my_proje
#### Example of a simple crew with a sequential process:
Instantiate your crew:
Instatiate your crew:
```shell
crewai create crew latest-ai-development
@@ -121,7 +121,7 @@ researcher:
You're a seasoned researcher with a knack for uncovering the latest
developments in {topic}. Known for your ability to find the most relevant
information and present it in a clear and concise manner.
reporting_analyst:
role: >
{topic} Reporting Analyst
@@ -205,7 +205,7 @@ class LatestAiDevelopmentCrew():
tasks=self.tasks, # Automatically created by the @task decorator
process=Process.sequential,
verbose=True,
)
)
```
**main.py**
@@ -357,7 +357,7 @@ uv run pytest .
### Running static type checks
```bash
uvx mypy src
uvx mypy
```
### Packaging
@@ -399,7 +399,7 @@ Data collected includes:
- Roles of agents in a crew
- Understand high level use cases so we can build better tools, integrations and examples about it
- Tools names available
- Understand out of the publicly available tools, which ones are being used the most so we can improve them
- Understand out of the publically available tools, which ones are being used the most so we can improve them
Users can opt-in to Further Telemetry, sharing the complete telemetry data by setting the `share_crew` attribute to `True` on their Crews. Enabling `share_crew` results in the collection of detailed crew and task execution data, including `goal`, `backstory`, `context`, and `output` of tasks. This enables a deeper insight into usage patterns while respecting the user's choice to share.

View File

@@ -1,6 +1,6 @@
---
title: Knowledge
description: Understand what knowledge is in CrewAI and how to effectively use it.
description: What is knowledge in CrewAI and how to use it.
icon: book
---
@@ -8,14 +8,7 @@ icon: book
## Introduction
Knowledge in CrewAI serves as a foundational component for enriching AI agents with contextual and relevant information. It enables agents to access and utilize structured data sources during their execution processes, making them more intelligent and responsive.
The Knowledge class in CrewAI provides a powerful way to manage and query knowledge sources for your AI agents. This guide will show you how to implement knowledge management in your CrewAI projects.
## What is Knowledge?
The `Knowledge` class in CrewAI manages various sources that store information, which can be queried and retrieved by AI agents. This modular approach allows you to integrate diverse data formats such as text, PDFs, spreadsheets, and more into your AI workflows.
Additionally, we have specific tools for generate knowledge sources for strings, text files, PDF's, and Spreadsheets. You can expand on any source type by extending the `KnowledgeSource` class.
## Basic Implementation
@@ -32,14 +25,17 @@ string_source = StringKnowledgeSource(
content=content, metadata={"preference": "personal"}
)
# Create an agent with the knowledge store
llm = LLM(model="gpt-4o-mini", temperature=0)
# Create an agent with the knowledge store
agent = Agent(
role="About User",
goal="You know everything about the user.",
backstory="""You are a master at understanding people and their preferences.""",
verbose=True
verbose=True,
allow_delegation=False,
llm=llm,
)
task = Task(
description="Answer the following questions about the user: {question}",
expected_output="An answer to the question.",
@@ -51,41 +47,12 @@ crew = Crew(
tasks=[task],
verbose=True,
process=Process.sequential,
knowledge_sources=[string_source], # Enable knowledge by adding the sources here.
knowledge={"sources": [string_source], "metadata": {"preference": "personal"}}, # Enable knowledge by adding the sources here. You can also add more sources to the sources list.
)
result = crew.kickoff(inputs={"question": "What city does John live in and how old is he?"})
```
## Appending Knowledge Sources To Individual Agents
Sometimes you may want to append knowledge sources to an individual agent. You can do this by setting the `knowledge` parameter in the `Agent` class.
```python
agent = Agent(
...
knowledge_sources=[
StringKnowledgeSource(
content="Users name is John. He is 30 years old and lives in San Francisco.",
metadata={"preference": "personal"},
)
],
)
```
## Agent Level Knowledge Sources
You can also append knowledge sources to an individual agent by setting the `knowledge_sources` parameter in the `Agent` class.
```python
string_source = StringKnowledgeSource(
content="Users name is John. He is 30 years old and lives in San Francisco.",
metadata={"preference": "personal"},
)
agent = Agent(
...
knowledge_sources=[string_source],
)
```
## Embedder Configuration
@@ -99,7 +66,10 @@ string_source = StringKnowledgeSource(
)
crew = Crew(
...
knowledge_sources=[string_source],
embedder_config={"provider": "ollama", "config": {"model": "nomic-embed-text:latest"}},
knowledge={
"sources": [string_source],
"metadata": {"preference": "personal"},
"embedder_config": {"provider": "openai", "config": {"model": "text-embedding-3-small"}},
},
)
```

View File

@@ -310,8 +310,8 @@ These are examples of how to configure LLMs for your agent.
from crewai import LLM
llm = LLM(
model="llama-3.1-sonar-large-128k-online",
base_url="https://api.perplexity.ai/",
model="perplexity/mistral-7b-instruct",
base_url="https://api.perplexity.ai/v1",
api_key="your-api-key-here"
)
agent = Agent(llm=llm, ...)
@@ -400,4 +400,4 @@ This is particularly useful when working with OpenAI-compatible APIs or when you
- **API Errors**: Check your API key, network connection, and rate limits.
- **Unexpected Outputs**: Refine your prompts and adjust temperature or top_p.
- **Performance Issues**: Consider using a more powerful model or optimizing your queries.
- **Timeout Errors**: Increase the `timeout` parameter or optimize your input.
- **Timeout Errors**: Increase the `timeout` parameter or optimize your input.

View File

@@ -1,59 +0,0 @@
---
title: Before and After Kickoff Hooks
description: Learn how to use before and after kickoff hooks in CrewAI
---
CrewAI provides hooks that allow you to execute code before and after a crew's kickoff. These hooks are useful for preprocessing inputs or post-processing results.
## Before Kickoff Hook
The before kickoff hook is executed before the crew starts its tasks. It receives the input dictionary and can modify it before passing it to the crew. You can use this hook to set up your environment, load necessary data, or preprocess your inputs. This is useful in scenarios where the input data might need enrichment or validation before being processed by the crew.
Here's an example of defining a before kickoff function in your `crew.py`:
```python
from crewai import CrewBase, before_kickoff
@CrewBase
class MyCrew:
@before_kickoff
def prepare_data(self, inputs):
# Preprocess or modify inputs
inputs['processed'] = True
return inputs
#...
```
In this example, the prepare_data function modifies the inputs by adding a new key-value pair indicating that the inputs have been processed.
## After Kickoff Hook
The after kickoff hook is executed after the crew has completed its tasks. It receives the result object, which contains the outputs of the crew's execution. This hook is ideal for post-processing results, such as logging, data transformation, or further analysis.
Here's how you can define an after kickoff function in your `crew.py`:
```python
from crewai import CrewBase, after_kickoff
@CrewBase
class MyCrew:
@after_kickoff
def log_results(self, result):
# Log or modify the results
print("Crew execution completed with result:", result)
return result
# ...
```
In the `log_results` function, the results of the crew execution are simply printed out. You can extend this to perform more complex operations such as sending notifications or integrating with other services.
## Utilizing Both Hooks
Both hooks can be used together to provide a comprehensive setup and teardown process for your crew's execution. They are particularly useful in maintaining clean code architecture by separating concerns and enhancing the modularity of your CrewAI implementations.
## Conclusion
Before and after kickoff hooks in CrewAI offer powerful ways to interact with the lifecycle of a crew's execution. By understanding and utilizing these hooks, you can greatly enhance the robustness and flexibility of your AI agents.

View File

@@ -68,7 +68,6 @@
"concepts/tasks",
"concepts/crews",
"concepts/flows",
"concepts/knowledge",
"concepts/llms",
"concepts/processes",
"concepts/collaboration",

View File

@@ -8,7 +8,7 @@ icon: rocket
Let's create a simple crew that will help us `research` and `report` on the `latest AI developments` for a given topic or subject.
Before we proceed, make sure you have `crewai` and `crewai-tools` installed.
Before we proceed, make sure you have `crewai` and `crewai-tools` installed.
If you haven't installed them yet, you can do so by following the [installation guide](/installation).
Follow the steps below to get crewing! 🚣‍♂️
@@ -23,7 +23,7 @@ Follow the steps below to get crewing! 🚣‍♂️
```
</CodeGroup>
</Step>
<Step title="Modify your `agents.yaml` file">
<Step title="Modify your `agents.yaml` file">
<Tip>
You can also modify the agents as needed to fit your use case or copy and paste as is to your project.
Any variable interpolated in your `agents.yaml` and `tasks.yaml` files like `{topic}` will be replaced by the value of the variable in the `main.py` file.
@@ -39,7 +39,7 @@ Follow the steps below to get crewing! 🚣‍♂️
You're a seasoned researcher with a knack for uncovering the latest
developments in {topic}. Known for your ability to find the most relevant
information and present it in a clear and concise manner.
reporting_analyst:
role: >
{topic} Reporting Analyst
@@ -51,7 +51,7 @@ Follow the steps below to get crewing! 🚣‍♂️
it easy for others to understand and act on the information you provide.
```
</Step>
<Step title="Modify your `tasks.yaml` file">
<Step title="Modify your `tasks.yaml` file">
```yaml tasks.yaml
# src/latest_ai_development/config/tasks.yaml
research_task:
@@ -73,8 +73,8 @@ Follow the steps below to get crewing! 🚣‍♂️
agent: reporting_analyst
output_file: report.md
```
</Step>
<Step title="Modify your `crew.py` file">
</Step>
<Step title="Modify your `crew.py` file">
```python crew.py
# src/latest_ai_development/crew.py
from crewai import Agent, Crew, Process, Task
@@ -121,34 +121,10 @@ Follow the steps below to get crewing! 🚣‍♂️
tasks=self.tasks, # Automatically created by the @task decorator
process=Process.sequential,
verbose=True,
)
)
```
</Step>
<Step title="[Optional] Add before and after crew functions">
```python crew.py
# src/latest_ai_development/crew.py
from crewai import Agent, Crew, Process, Task
from crewai.project import CrewBase, agent, crew, task, before_kickoff, after_kickoff
from crewai_tools import SerperDevTool
@CrewBase
class LatestAiDevelopmentCrew():
"""LatestAiDevelopment crew"""
@before_kickoff
def before_kickoff_function(self, inputs):
print(f"Before kickoff function with inputs: {inputs}")
return inputs # You can return the inputs or modify them as needed
@after_kickoff
def after_kickoff_function(self, result):
print(f"After kickoff function with result: {result}")
return result # You can return the result or modify it as needed
# ... remaining code
```
</Step>
<Step title="Feel free to pass custom inputs to your crew">
<Step title="Feel free to pass custom inputs to your crew">
For example, you can pass the `topic` input to your crew to customize the research and reporting.
```python main.py
#!/usr/bin/env python
@@ -261,14 +237,14 @@ Follow the steps below to get crewing! 🚣‍♂️
### Note on Consistency in Naming
The names you use in your YAML files (`agents.yaml` and `tasks.yaml`) should match the method names in your Python code.
For example, you can reference the agent for specific tasks from `tasks.yaml` file.
For example, you can reference the agent for specific tasks from `tasks.yaml` file.
This naming consistency allows CrewAI to automatically link your configurations with your code; otherwise, your task won't recognize the reference properly.
#### Example References
<Tip>
Note how we use the same name for the agent in the `agents.yaml` (`email_summarizer`) file as the method name in the `crew.py` (`email_summarizer`) file.
</Tip>
</Tip>
```yaml agents.yaml
email_summarizer:
@@ -305,8 +281,6 @@ Use the annotations to properly reference the agent and task in the `crew.py` fi
* `@task`
* `@crew`
* `@tool`
* `@before_kickoff`
* `@after_kickoff`
* `@callback`
* `@output_json`
* `@output_pydantic`
@@ -330,7 +304,7 @@ def email_summarizer_task(self) -> Task:
<Tip>
In addition to the [sequential process](../how-to/sequential-process), you can use the [hierarchical process](../how-to/hierarchical-process),
which automatically assigns a manager to the defined crew to properly coordinate the planning and execution of tasks through delegation and validation of results.
which automatically assigns a manager to the defined crew to properly coordinate the planning and execution of tasks through delegation and validation of results.
You can learn more about the core concepts [here](/concepts).
</Tip>

View File

@@ -1,6 +1,6 @@
[project]
name = "crewai"
version = "0.83.0"
version = "0.80.0"
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
readme = "README.md"
requires-python = ">=3.10,<=3.13"
@@ -9,6 +9,7 @@ authors = [
]
dependencies = [
"pydantic>=2.4.2",
"langchain>=0.2.16",
"openai>=1.13.3",
"opentelemetry-api>=1.22.0",
"opentelemetry-sdk>=1.22.0",
@@ -28,8 +29,6 @@ dependencies = [
"tomli-w>=1.1.0",
"tomli>=2.0.2",
"chromadb>=0.5.18",
"pdfplumber>=0.11.4",
"openpyxl>=3.1.5",
]
[project.urls]

View File

@@ -16,7 +16,7 @@ warnings.filterwarnings(
category=UserWarning,
module="pydantic.main",
)
__version__ = "0.83.0"
__version__ = "0.80.0"
__all__ = [
"Agent",
"Crew",

View File

@@ -1,7 +1,7 @@
import os
import shutil
import subprocess
from typing import Any, List, Literal, Optional, Union, Dict
from typing import Any, List, Literal, Optional, Union
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
@@ -10,18 +10,13 @@ from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.agents.crew_agent_executor import CrewAgentExecutor
from crewai.cli.constants import ENV_VARS
from crewai.llm import LLM
from crewai.knowledge.knowledge import Knowledge
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
from crewai.memory.contextual.contextual_memory import ContextualMemory
from crewai.task import Task
from crewai.tools import BaseTool
from crewai.tools.agent_tools.agent_tools import AgentTools
from crewai.utilities import Converter, Prompts
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
from crewai.utilities.converter import generate_model_description
from crewai.utilities.token_counter_callback import TokenCalcHandler
from crewai.utilities.training_handler import CrewTrainingHandler
from crewai.knowledge.utils.knowledge_utils import extract_knowledge_context
def mock_agent_ops_provider():
@@ -68,7 +63,6 @@ class Agent(BaseAgent):
allow_delegation: Whether the agent is allowed to delegate tasks to other agents.
tools: Tools at agents disposal
step_callback: Callback to be executed after each step of the agent execution.
knowledge_sources: Knowledge sources for the agent.
"""
_times_executed: int = PrivateAttr(default=0)
@@ -126,23 +120,11 @@ class Agent(BaseAgent):
default="safe",
description="Mode for code execution: 'safe' (using Docker) or 'unsafe' (direct execution).",
)
embedder_config: Optional[Dict[str, Any]] = Field(
default=None,
description="Embedder configuration for the agent.",
)
knowledge_sources: Optional[List[BaseKnowledgeSource]] = Field(
default=None,
description="Knowledge sources for the agent.",
)
_knowledge: Optional[Knowledge] = PrivateAttr(
default=None,
)
@model_validator(mode="after")
def post_init_setup(self):
self._set_knowledge()
self.agent_ops_agent_name = self.role
unaccepted_attributes = [
unnacepted_attributes = [
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY",
"AWS_REGION_NAME",
@@ -176,23 +158,28 @@ class Agent(BaseAgent):
for provider, env_vars in ENV_VARS.items():
if provider == set_provider:
for env_var in env_vars:
if env_var["key_name"] in unnacepted_attributes:
continue
# Check if the environment variable is set
key_name = env_var.get("key_name")
if key_name and key_name not in unaccepted_attributes:
env_value = os.environ.get(key_name)
if "key_name" in env_var:
env_value = os.environ.get(env_var["key_name"])
if env_value:
# Map key names containing "API_KEY" to "api_key"
key_name = (
"api_key" if "API_KEY" in key_name else key_name
"api_key"
if "API_KEY" in env_var["key_name"]
else env_var["key_name"]
)
# Map key names containing "API_BASE" to "api_base"
key_name = (
"api_base" if "API_BASE" in key_name else key_name
"api_base"
if "API_BASE" in env_var["key_name"]
else key_name
)
# Map key names containing "API_VERSION" to "api_version"
key_name = (
"api_version"
if "API_VERSION" in key_name
if "API_VERSION" in env_var["key_name"]
else key_name
)
llm_params[key_name] = env_value
@@ -248,24 +235,9 @@ class Agent(BaseAgent):
self.cache_handler = CacheHandler()
self.set_cache_handler(self.cache_handler)
def _set_knowledge(self):
try:
if self.knowledge_sources:
knowledge_agent_name = f"{self.role.replace(' ', '_')}"
if isinstance(self.knowledge_sources, list) and all(
isinstance(k, BaseKnowledgeSource) for k in self.knowledge_sources
):
self._knowledge = Knowledge(
sources=self.knowledge_sources,
embedder_config=self.embedder_config,
collection_name=knowledge_agent_name,
)
except (TypeError, ValueError) as e:
raise ValueError(f"Invalid Knowledge Configuration: {str(e)}")
def execute_task(
self,
task: Task,
task: Any,
context: Optional[str] = None,
tools: Optional[List[BaseTool]] = None,
) -> str:
@@ -284,22 +256,6 @@ class Agent(BaseAgent):
task_prompt = task.prompt()
# If the task requires output in JSON or Pydantic format,
# append specific instructions to the task prompt to ensure
# that the final answer does not include any code block markers
if task.output_json or task.output_pydantic:
# Generate the schema based on the output format
if task.output_json:
# schema = json.dumps(task.output_json, indent=2)
schema = generate_model_description(task.output_json)
elif task.output_pydantic:
schema = generate_model_description(task.output_pydantic)
task_prompt += "\n" + self.i18n.slice("formatted_task_instructions").format(
output_format=schema
)
if context:
task_prompt = self.i18n.slice("task_with_context").format(
task=task_prompt, context=context
@@ -317,21 +273,17 @@ class Agent(BaseAgent):
if memory.strip() != "":
task_prompt += self.i18n.slice("memory").format(memory=memory)
if self._knowledge:
agent_knowledge_snippets = self._knowledge.query([task.prompt()])
if agent_knowledge_snippets:
agent_knowledge_context = extract_knowledge_context(
agent_knowledge_snippets
)
if agent_knowledge_context:
task_prompt += agent_knowledge_context
if self.crew:
knowledge_snippets = self.crew.query_knowledge([task.prompt()])
if knowledge_snippets:
crew_knowledge_context = extract_knowledge_context(knowledge_snippets)
if crew_knowledge_context:
task_prompt += crew_knowledge_context
# Integrate the knowledge base
if self.crew and self.crew.knowledge:
knowledge_snippets = self.crew.knowledge.query([task.prompt()])
valid_snippets = [
result["context"]
for result in knowledge_snippets
if result and result.get("context")
]
if valid_snippets:
formatted_knowledge = "\n".join(valid_snippets)
task_prompt += f"\n\nAdditional Information:\n{formatted_knowledge}"
tools = tools or self.tools or []
self.create_agent_executor(tools=tools, task=task)
@@ -447,7 +399,7 @@ class Agent(BaseAgent):
for tool in tools:
if isinstance(tool, CrewAITool):
tools_list.append(tool.to_structured_tool())
tools_list.append(tool.to_langchain())
else:
tools_list.append(tool)
except ModuleNotFoundError:

View File

@@ -19,7 +19,6 @@ from crewai.agents.agent_builder.utilities.base_token_process import TokenProces
from crewai.agents.cache.cache_handler import CacheHandler
from crewai.agents.tools_handler import ToolsHandler
from crewai.tools import BaseTool
from crewai.tools.base_tool import Tool
from crewai.utilities import I18N, Logger, RPMController
from crewai.utilities.config import process_config
@@ -107,7 +106,7 @@ class BaseAgent(ABC, BaseModel):
default=False,
description="Enable agent to delegate and ask questions among each other.",
)
tools: Optional[List[Any]] = Field(
tools: Optional[List[BaseTool]] = Field(
default_factory=list, description="Tools at agents' disposal"
)
max_iter: Optional[int] = Field(
@@ -136,35 +135,6 @@ class BaseAgent(ABC, BaseModel):
def process_model_config(cls, values):
return process_config(values, cls)
@field_validator("tools")
@classmethod
def validate_tools(cls, tools: List[Any]) -> List[BaseTool]:
"""Validate and process the tools provided to the agent.
This method ensures that each tool is either an instance of BaseTool
or an object with 'name', 'func', and 'description' attributes. If the
tool meets these criteria, it is processed and added to the list of
tools. Otherwise, a ValueError is raised.
"""
processed_tools = []
for tool in tools:
if isinstance(tool, BaseTool):
processed_tools.append(tool)
elif (
hasattr(tool, "name")
and hasattr(tool, "func")
and hasattr(tool, "description")
):
# Tool has the required attributes, create a Tool instance
processed_tools.append(Tool.from_langchain(tool))
else:
raise ValueError(
f"Invalid tool type: {type(tool)}. "
"Tool must be an instance of BaseTool or "
"an object with 'name', 'func', and 'description' attributes."
)
return processed_tools
@model_validator(mode="after")
def validate_and_set_attributes(self):
# Validate required fields

View File

@@ -7,7 +7,6 @@ from rich.console import Console
from .constants import AUTH0_AUDIENCE, AUTH0_CLIENT_ID, AUTH0_DOMAIN
from .utils import TokenManager, validate_token
from crewai.cli.tools.main import ToolCommand
console = Console()
@@ -64,22 +63,7 @@ class AuthenticationCommand:
validate_token(token_data["id_token"])
expires_in = 360000 # Token expiration time in seconds
self.token_manager.save_tokens(token_data["access_token"], expires_in)
try:
ToolCommand().login()
except Exception:
console.print(
"\n[bold yellow]Warning:[/bold yellow] Authentication with the Tool Repository failed.",
style="yellow",
)
console.print(
"Other features will work normally, but you may experience limitations "
"with downloading and publishing tools."
"\nRun [bold]crewai login[/bold] to try logging in again.\n",
style="yellow",
)
console.print("\n[bold green]Welcome to CrewAI Enterprise![/bold green]\n")
console.print("\nWelcome to CrewAI+ !!", style="green")
return
if token_data["error"] not in ("authorization_pending", "slow_down"):

View File

@@ -1,10 +0,0 @@
from .utils import TokenManager
def get_auth_token() -> str:
"""Get the authentication token."""
access_token = TokenManager().get_token()
if not access_token:
raise Exception()
return access_token

View File

@@ -2,7 +2,7 @@ import requests
from requests.exceptions import JSONDecodeError
from rich.console import Console
from crewai.cli.plus_api import PlusAPI
from crewai.cli.authentication.token import get_auth_token
from crewai.cli.utils import get_auth_token
from crewai.telemetry.telemetry import Telemetry
console = Console()

View File

@@ -1,7 +1,7 @@
from typing import Optional
import requests
from os import getenv
from crewai.cli.version import get_crewai_version
from crewai.cli.utils import get_crewai_version
from urllib.parse import urljoin

View File

@@ -3,8 +3,7 @@ import subprocess
import click
from packaging import version
from crewai.cli.utils import read_toml
from crewai.cli.version import get_crewai_version
from crewai.cli.utils import get_crewai_version, read_toml
def run_crew() -> None:

View File

@@ -1,5 +1,5 @@
from crewai import Agent, Crew, Process, Task
from crewai.project import CrewBase, agent, crew, task, before_kickoff, after_kickoff
from crewai.project import CrewBase, agent, crew, task
# Uncomment the following line to use an example of a custom tool
# from {{folder_name}}.tools.custom_tool import MyCustomTool
@@ -14,18 +14,6 @@ class {{crew_name}}():
agents_config = 'config/agents.yaml'
tasks_config = 'config/tasks.yaml'
@before_kickoff # Optional hook to be executed before the crew starts
def pull_data_example(self, inputs):
# Example of pulling data from an external API, dynamically changing the inputs
inputs['extra_data'] = "This is extra data"
return inputs
@after_kickoff # Optional hook to be executed after the crew has finished
def log_results(self, output):
# Example of logging results, dynamically changing the output
print(f"Results: {output}")
return output
@agent
def researcher(self) -> Agent:
return Agent(

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<=3.13"
dependencies = [
"crewai[tools]>=0.83.0,<1.0.0"
"crewai[tools]>=0.80.0,<1.0.0"
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<=3.13"
dependencies = [
"crewai[tools]>=0.83.0,<1.0.0",
"crewai[tools]>=0.80.0,<1.0.0",
]
[project.scripts]

View File

@@ -6,7 +6,7 @@ authors = ["Your Name <you@example.com>"]
[tool.poetry.dependencies]
python = ">=3.10,<=3.13"
crewai = { extras = ["tools"], version = ">=0.83.0,<1.0.0" }
crewai = { extras = ["tools"], version = ">=0.80.0,<1.0.0" }
asyncio = "*"
[tool.poetry.scripts]

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = ["Your Name <you@example.com>"]
requires-python = ">=3.10,<=3.13"
dependencies = [
"crewai[tools]>=0.83.0,<1.0.0"
"crewai[tools]>=0.80.0,<1.0.0"
]
[project.scripts]

View File

@@ -5,6 +5,6 @@ description = "Power up your crews with {{folder_name}}"
readme = "README.md"
requires-python = ">=3.10,<=3.13"
dependencies = [
"crewai[tools]>=0.83.0"
"crewai[tools]>=0.80.0"
]

View File

@@ -1,3 +1,4 @@
import importlib.metadata
import os
import shutil
import sys
@@ -8,6 +9,7 @@ import click
import tomli
from rich.console import Console
from crewai.cli.authentication.utils import TokenManager
from crewai.cli.constants import ENV_VARS
if sys.version_info >= (3, 11):
@@ -135,6 +137,11 @@ def _get_nested_value(data: Dict[str, Any], keys: List[str]) -> Any:
return reduce(dict.__getitem__, keys, data)
def get_crewai_version() -> str:
"""Get the version number of CrewAI running the CLI"""
return importlib.metadata.version("crewai")
def fetch_and_json_env_file(env_file_path: str = ".env") -> dict:
"""Fetch the environment variables from a .env file and return them as a dictionary."""
try:
@@ -159,6 +166,14 @@ def fetch_and_json_env_file(env_file_path: str = ".env") -> dict:
return {}
def get_auth_token() -> str:
"""Get the authentication token."""
access_token = TokenManager().get_token()
if not access_token:
raise Exception()
return access_token
def tree_copy(source, destination):
"""Copies the entire directory structure from the source to the destination."""
for item in os.listdir(source):

View File

@@ -1,6 +0,0 @@
import importlib.metadata
def get_crewai_version() -> str:
"""Get the version number of CrewAI running the CLI"""
return importlib.metadata.version("crewai")

View File

@@ -28,7 +28,6 @@ from crewai.memory.entity.entity_memory import EntityMemory
from crewai.memory.long_term.long_term_memory import LongTermMemory
from crewai.memory.short_term.short_term_memory import ShortTermMemory
from crewai.knowledge.knowledge import Knowledge
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
from crewai.memory.user.user_memory import UserMemory
from crewai.process import Process
from crewai.task import Task
@@ -203,14 +202,11 @@ class Crew(BaseModel):
default=[],
description="List of execution logs for tasks",
)
knowledge_sources: Optional[List[BaseKnowledgeSource]] = Field(
default=None,
description="Knowledge sources for the crew. Add knowledge sources to the knowledge object.",
)
_knowledge: Optional[Knowledge] = PrivateAttr(
default=None,
knowledge: Optional[Dict[str, Any]] = Field(
default=None, description="Knowledge for the crew. Add knowledge sources to the knowledge object."
)
@field_validator("id", mode="before")
@classmethod
def _deny_user_set_id(cls, v: Optional[UUID4]) -> None:
@@ -286,22 +282,11 @@ class Crew(BaseModel):
@model_validator(mode="after")
def create_crew_knowledge(self) -> "Crew":
"""Create the knowledge for the crew."""
if self.knowledge_sources:
if self.knowledge:
try:
if isinstance(self.knowledge_sources, list) and all(
isinstance(k, BaseKnowledgeSource) for k in self.knowledge_sources
):
self._knowledge = Knowledge(
sources=self.knowledge_sources,
embedder_config=self.embedder,
collection_name="crew",
)
except Exception as e:
self._logger.log(
"warning", f"Failed to init knowledge: {e}", color="yellow"
)
self.knowledge = Knowledge(**self.knowledge) if isinstance(self.knowledge, dict) else self.knowledge
except (TypeError, ValueError) as e:
raise ValueError(f"Invalid knowledge configuration: {str(e)}")
return self
@model_validator(mode="after")
@@ -957,11 +942,6 @@ class Crew(BaseModel):
result = self._execute_tasks(self.tasks, start_index, True)
return result
def query_knowledge(self, query: List[str]) -> Union[List[Dict[str, Any]], None]:
if self._knowledge:
return self._knowledge.query(query)
return None
def copy(self):
"""Create a deep copy of the Crew."""

View File

@@ -5,8 +5,8 @@ from pydantic import BaseModel, ConfigDict, Field
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
from crewai.knowledge.storage.knowledge_storage import KnowledgeStorage
from crewai.utilities.logger import Logger
from crewai.utilities.constants import DEFAULT_SCORE_THRESHOLD
os.environ["TOKENIZERS_PARALLELISM"] = "false" # removes logging from fastembed
@@ -18,33 +18,24 @@ class Knowledge(BaseModel):
storage: KnowledgeStorage = Field(default_factory=KnowledgeStorage)
embedder_config: Optional[Dict[str, Any]] = None
"""
sources: List[BaseKnowledgeSource] = Field(default_factory=list)
model_config = ConfigDict(arbitrary_types_allowed=True)
storage: KnowledgeStorage = Field(default_factory=KnowledgeStorage)
embedder_config: Optional[Dict[str, Any]] = None
collection_name: Optional[str] = None
def __init__(
self,
collection_name: str,
sources: List[BaseKnowledgeSource],
embedder_config: Optional[Dict[str, Any]] = None,
storage: Optional[KnowledgeStorage] = None,
**data,
):
def __init__(self, embedder_config: Optional[Dict[str, Any]] = None, **data):
super().__init__(**data)
if storage:
self.storage = storage
else:
self.storage = KnowledgeStorage(
embedder_config=embedder_config, collection_name=collection_name
self.storage = KnowledgeStorage(embedder_config=embedder_config or None)
try:
for source in self.sources:
source.add()
except Exception as e:
Logger(verbose=True).log(
"warning",
f"Failed to init knowledge: {e}",
color="yellow",
)
self.sources = sources
self.storage.initialize_knowledge_storage()
for source in sources:
source.storage = self.storage
source.add()
def query(
self, query: List[str], limit: int = 3, preference: Optional[str] = None
@@ -61,8 +52,3 @@ class Knowledge(BaseModel):
score_threshold=DEFAULT_SCORE_THRESHOLD,
)
return results
def _add_sources(self):
for source in self.sources:
source.storage = self.storage
source.add()

View File

@@ -1,5 +1,5 @@
from abc import ABC, abstractmethod
from typing import List, Dict, Any, Optional
from typing import List, Dict, Any
import numpy as np
from pydantic import BaseModel, ConfigDict, Field
@@ -18,7 +18,6 @@ class BaseKnowledgeSource(BaseModel, ABC):
model_config = ConfigDict(arbitrary_types_allowed=True)
storage: KnowledgeStorage = Field(default_factory=KnowledgeStorage)
metadata: Dict[str, Any] = Field(default_factory=dict)
collection_name: Optional[str] = Field(default=None)
@abstractmethod
def load_content(self) -> Dict[Any, str]:

View File

@@ -1,4 +1,4 @@
from typing import List, Optional
from typing import List
from pydantic import Field
@@ -9,7 +9,6 @@ class StringKnowledgeSource(BaseKnowledgeSource):
"""A knowledge source that stores and queries plain text content using embeddings."""
content: str = Field(...)
collection_name: Optional[str] = Field(default=None)
def model_post_init(self, _):
"""Post-initialization method to validate content."""

View File

@@ -3,16 +3,12 @@ import io
import logging
import chromadb
import os
import chromadb.errors
from crewai.utilities.paths import db_storage_path
from typing import Optional, List, Dict, Any, Union
from typing import Optional, List
from typing import Dict, Any
from crewai.utilities import EmbeddingConfigurator
from crewai.knowledge.storage.base_knowledge_storage import BaseKnowledgeStorage
import hashlib
from chromadb.config import Settings
from chromadb.api import ClientAPI
from crewai.utilities.logger import Logger
@contextlib.contextmanager
@@ -39,16 +35,9 @@ class KnowledgeStorage(BaseKnowledgeStorage):
"""
collection: Optional[chromadb.Collection] = None
collection_name: Optional[str] = "knowledge"
app: Optional[ClientAPI] = None
def __init__(
self,
embedder_config: Optional[Dict[str, Any]] = None,
collection_name: Optional[str] = None,
):
self.collection_name = collection_name
self._set_embedder_config(embedder_config)
def __init__(self, embedder_config: Optional[Dict[str, Any]] = None):
self._initialize_app(embedder_config or {})
def search(
self,
@@ -78,75 +67,43 @@ class KnowledgeStorage(BaseKnowledgeStorage):
else:
raise Exception("Collection not initialized")
def initialize_knowledge_storage(self):
base_path = os.path.join(db_storage_path(), "knowledge")
def _initialize_app(self, embedder_config: Optional[Dict[str, Any]] = None):
import chromadb
from chromadb.config import Settings
self._set_embedder_config(embedder_config)
chroma_client = chromadb.PersistentClient(
path=base_path,
path=f"{db_storage_path()}/knowledge",
settings=Settings(allow_reset=True),
)
self.app = chroma_client
try:
collection_name = (
f"knowledge_{self.collection_name}"
if self.collection_name
else "knowledge"
)
if self.app:
self.collection = self.app.get_or_create_collection(
name=collection_name, embedding_function=self.embedder_config
)
else:
raise Exception("Vector Database Client not initialized")
self.collection = self.app.get_or_create_collection(name="knowledge")
except Exception:
raise Exception("Failed to create or get collection")
def reset(self):
if self.app:
self.app.reset()
else:
base_path = os.path.join(db_storage_path(), "knowledge")
self.app = chromadb.PersistentClient(
path=base_path,
settings=Settings(allow_reset=True),
)
self.app.reset()
def save(
self,
documents: List[str],
metadata: Union[Dict[str, Any], List[Dict[str, Any]]],
self, documents: List[str], metadata: Dict[str, Any] | List[Dict[str, Any]]
):
if self.collection:
try:
metadatas = [metadata] if isinstance(metadata, dict) else metadata
metadatas = [metadata] if isinstance(metadata, dict) else metadata
ids = [
hashlib.sha256(doc.encode("utf-8")).hexdigest() for doc in documents
]
ids = [
hashlib.sha256(doc.encode("utf-8")).hexdigest() for doc in documents
]
self.collection.upsert(
documents=documents,
metadatas=metadatas,
ids=ids,
)
except chromadb.errors.InvalidDimensionException as e:
Logger(verbose=True).log(
"error",
"Embedding dimension mismatch. This usually happens when mixing different embedding models. Try resetting the collection using `crewai reset-memories -a`",
"red",
)
raise ValueError(
"Embedding dimension mismatch. Make sure you're using the same embedding model "
"across all operations with this collection."
"Try resetting the collection using `crewai reset-memories -a`"
) from e
except Exception as e:
Logger(verbose=True).log(
"error", f"Failed to upsert documents: {e}", "red"
)
raise
self.collection.upsert(
documents=documents,
metadatas=metadatas,
ids=ids,
)
else:
raise Exception("Collection not initialized")

View File

@@ -1,12 +0,0 @@
from typing import Any, Dict, List
def extract_knowledge_context(knowledge_snippets: List[Dict[str, Any]]) -> str:
"""Extract knowledge from the task prompt."""
valid_snippets = [
result["context"]
for result in knowledge_snippets
if result and result.get("context")
]
snippet = "\n".join(valid_snippets)
return f"Additional Information: {snippet}" if valid_snippets else ""

View File

@@ -1,6 +1,6 @@
import io
import logging
import sys
import threading
import warnings
from contextlib import contextmanager
from typing import Any, Dict, List, Optional, Union
@@ -13,25 +13,16 @@ from crewai.utilities.exceptions.context_window_exceeding_exception import (
)
class FilteredStream:
def __init__(self, original_stream):
self._original_stream = original_stream
self._lock = threading.Lock()
def write(self, s) -> int:
with self._lock:
if (
"Give Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new"
in s
or "LiteLLM.Info: If you need to debug this error, use `litellm.set_verbose=True`"
in s
):
return 0
return self._original_stream.write(s)
def flush(self):
with self._lock:
return self._original_stream.flush()
class FilteredStream(io.StringIO):
def write(self, s):
if (
"Give Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new"
in s
or "LiteLLM.Info: If you need to debug this error, use `litellm.set_verbose=True`"
in s
):
return
super().write(s)
LLM_CONTEXT_WINDOW_SIZES = {
@@ -69,8 +60,8 @@ def suppress_warnings():
# Redirect stdout and stderr
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = FilteredStream(old_stdout)
sys.stderr = FilteredStream(old_stderr)
sys.stdout = FilteredStream()
sys.stderr = FilteredStream()
try:
yield

View File

@@ -20,10 +20,10 @@ from pydantic import (
from pydantic_core import PydanticCustomError
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.tools.base_tool import BaseTool
from crewai.tasks.output_format import OutputFormat
from crewai.tasks.task_output import TaskOutput
from crewai.telemetry.telemetry import Telemetry
from crewai.tools.base_tool import BaseTool
from crewai.utilities.config import process_config
from crewai.utilities.converter import Converter, convert_to_model
from crewai.utilities.i18n import I18N
@@ -208,9 +208,7 @@ class Task(BaseModel):
"""Execute the task asynchronously."""
future: Future[TaskOutput] = Future()
threading.Thread(
daemon=True,
target=self._execute_task_async,
args=(agent, context, tools, future),
target=self._execute_task_async, args=(agent, context, tools, future)
).start()
return future

View File

@@ -1,12 +1,10 @@
from abc import ABC, abstractmethod
from inspect import signature
from typing import Any, Callable, Type, get_args, get_origin
from pydantic import BaseModel, ConfigDict, Field, create_model, validator
from langchain_core.tools import StructuredTool
from pydantic import BaseModel, ConfigDict, Field, validator
from pydantic import BaseModel as PydanticBaseModel
from crewai.tools.structured_tool import CrewStructuredTool
class BaseTool(BaseModel, ABC):
class _ArgsSchemaPlaceholder(PydanticBaseModel):
@@ -65,10 +63,9 @@ class BaseTool(BaseModel, ABC):
) -> Any:
"""Here goes the actual implementation of the tool."""
def to_structured_tool(self) -> CrewStructuredTool:
"""Convert this tool to a CrewStructuredTool instance."""
def to_langchain(self) -> StructuredTool:
self._set_args_schema()
return CrewStructuredTool(
return StructuredTool(
name=self.name,
description=self.description,
args_schema=self.args_schema,
@@ -76,47 +73,17 @@ class BaseTool(BaseModel, ABC):
)
@classmethod
def from_langchain(cls, tool: Any) -> "BaseTool":
"""Create a Tool instance from a CrewStructuredTool.
This method takes a CrewStructuredTool object and converts it into a
Tool instance. It ensures that the provided tool has a callable 'func'
attribute and infers the argument schema if not explicitly provided.
"""
if not hasattr(tool, "func") or not callable(tool.func):
raise ValueError("The provided tool must have a callable 'func' attribute.")
args_schema = getattr(tool, "args_schema", None)
if args_schema is None:
# Infer args_schema from the function signature if not provided
func_signature = signature(tool.func)
annotations = func_signature.parameters
args_fields = {}
for name, param in annotations.items():
if name != "self":
param_annotation = (
param.annotation if param.annotation != param.empty else Any
)
field_info = Field(
default=...,
description="",
)
args_fields[name] = (param_annotation, field_info)
if args_fields:
args_schema = create_model(f"{tool.name}Input", **args_fields)
else:
# Create a default schema with no fields if no parameters are found
args_schema = create_model(
f"{tool.name}Input", __base__=PydanticBaseModel
)
return cls(
name=getattr(tool, "name", "Unnamed Tool"),
description=getattr(tool, "description", ""),
func=tool.func,
args_schema=args_schema,
)
def from_langchain(cls, tool: StructuredTool) -> "BaseTool":
if cls == Tool:
if tool.func is None:
raise ValueError("StructuredTool must have a callable 'func'")
return Tool(
name=tool.name,
description=tool.description,
args_schema=tool.args_schema,
func=tool.func,
)
raise NotImplementedError(f"from_langchain not implemented for {cls.__name__}")
def _set_args_schema(self):
if self.args_schema is None:
@@ -167,70 +134,17 @@ class BaseTool(BaseModel, ABC):
class Tool(BaseTool):
"""The function that will be executed when the tool is called."""
func: Callable
"""The function that will be executed when the tool is called."""
def _run(self, *args: Any, **kwargs: Any) -> Any:
return self.func(*args, **kwargs)
@classmethod
def from_langchain(cls, tool: Any) -> "Tool":
"""Create a Tool instance from a CrewStructuredTool.
This method takes a CrewStructuredTool object and converts it into a
Tool instance. It ensures that the provided tool has a callable 'func'
attribute and infers the argument schema if not explicitly provided.
Args:
tool (Any): The CrewStructuredTool object to be converted.
Returns:
Tool: A new Tool instance created from the provided CrewStructuredTool.
Raises:
ValueError: If the provided tool does not have a callable 'func' attribute.
"""
if not hasattr(tool, "func") or not callable(tool.func):
raise ValueError("The provided tool must have a callable 'func' attribute.")
args_schema = getattr(tool, "args_schema", None)
if args_schema is None:
# Infer args_schema from the function signature if not provided
func_signature = signature(tool.func)
annotations = func_signature.parameters
args_fields = {}
for name, param in annotations.items():
if name != "self":
param_annotation = (
param.annotation if param.annotation != param.empty else Any
)
field_info = Field(
default=...,
description="",
)
args_fields[name] = (param_annotation, field_info)
if args_fields:
args_schema = create_model(f"{tool.name}Input", **args_fields)
else:
# Create a default schema with no fields if no parameters are found
args_schema = create_model(
f"{tool.name}Input", __base__=PydanticBaseModel
)
return cls(
name=getattr(tool, "name", "Unnamed Tool"),
description=getattr(tool, "description", ""),
func=tool.func,
args_schema=args_schema,
)
def to_langchain(
tools: list[BaseTool | CrewStructuredTool],
) -> list[CrewStructuredTool]:
return [t.to_structured_tool() if isinstance(t, BaseTool) else t for t in tools]
tools: list[BaseTool | StructuredTool],
) -> list[StructuredTool]:
return [t.to_langchain() if isinstance(t, BaseTool) else t for t in tools]
def tool(*args):

View File

@@ -1,7 +1,6 @@
from pydantic import BaseModel, Field
from crewai.agents.cache import CacheHandler
from crewai.tools.structured_tool import CrewStructuredTool
class CacheTools(BaseModel):
@@ -14,7 +13,9 @@ class CacheTools(BaseModel):
)
def tool(self):
return CrewStructuredTool.from_function(
from langchain.tools import StructuredTool
return StructuredTool.from_function(
func=self.hit_cache,
name=self.name,
description="Reads directly from the cache",

View File

@@ -1,242 +0,0 @@
from __future__ import annotations
import inspect
import textwrap
from typing import Any, Callable, Optional, Union, get_type_hints
from pydantic import BaseModel, Field, create_model
from crewai.utilities.logger import Logger
class CrewStructuredTool:
"""A structured tool that can operate on any number of inputs.
This tool intends to replace StructuredTool with a custom implementation
that integrates better with CrewAI's ecosystem.
"""
def __init__(
self,
name: str,
description: str,
args_schema: type[BaseModel],
func: Callable[..., Any],
) -> None:
"""Initialize the structured tool.
Args:
name: The name of the tool
description: A description of what the tool does
args_schema: The pydantic model for the tool's arguments
func: The function to run when the tool is called
"""
self.name = name
self.description = description
self.args_schema = args_schema
self.func = func
self._logger = Logger()
# Validate the function signature matches the schema
self._validate_function_signature()
@classmethod
def from_function(
cls,
func: Callable,
name: Optional[str] = None,
description: Optional[str] = None,
return_direct: bool = False,
args_schema: Optional[type[BaseModel]] = None,
infer_schema: bool = True,
**kwargs: Any,
) -> CrewStructuredTool:
"""Create a tool from a function.
Args:
func: The function to create a tool from
name: The name of the tool. Defaults to the function name
description: The description of the tool. Defaults to the function docstring
return_direct: Whether to return the output directly
args_schema: Optional schema for the function arguments
infer_schema: Whether to infer the schema from the function signature
**kwargs: Additional arguments to pass to the tool
Returns:
A CrewStructuredTool instance
Example:
>>> def add(a: int, b: int) -> int:
... '''Add two numbers'''
... return a + b
>>> tool = CrewStructuredTool.from_function(add)
"""
name = name or func.__name__
description = description or inspect.getdoc(func)
if description is None:
raise ValueError(
f"Function {name} must have a docstring if description not provided."
)
# Clean up the description
description = textwrap.dedent(description).strip()
if args_schema is not None:
# Use provided schema
schema = args_schema
elif infer_schema:
# Infer schema from function signature
schema = cls._create_schema_from_function(name, func)
else:
raise ValueError(
"Either args_schema must be provided or infer_schema must be True."
)
return cls(
name=name,
description=description,
args_schema=schema,
func=func,
)
@staticmethod
def _create_schema_from_function(
name: str,
func: Callable,
) -> type[BaseModel]:
"""Create a Pydantic schema from a function's signature.
Args:
name: The name to use for the schema
func: The function to create a schema from
Returns:
A Pydantic model class
"""
# Get function signature
sig = inspect.signature(func)
# Get type hints
type_hints = get_type_hints(func)
# Create field definitions
fields = {}
for param_name, param in sig.parameters.items():
# Skip self/cls for methods
if param_name in ("self", "cls"):
continue
# Get type annotation
annotation = type_hints.get(param_name, Any)
# Get default value
default = ... if param.default == param.empty else param.default
# Add field
fields[param_name] = (annotation, Field(default=default))
# Create model
schema_name = f"{name.title()}Schema"
return create_model(schema_name, **fields)
def _validate_function_signature(self) -> None:
"""Validate that the function signature matches the args schema."""
sig = inspect.signature(self.func)
schema_fields = self.args_schema.model_fields
# Check required parameters
for param_name, param in sig.parameters.items():
# Skip self/cls for methods
if param_name in ("self", "cls"):
continue
# Skip **kwargs parameters
if param.kind in (
inspect.Parameter.VAR_KEYWORD,
inspect.Parameter.VAR_POSITIONAL,
):
continue
# Only validate required parameters without defaults
if param.default == inspect.Parameter.empty:
if param_name not in schema_fields:
raise ValueError(
f"Required function parameter '{param_name}' "
f"not found in args_schema"
)
def _parse_args(self, raw_args: Union[str, dict]) -> dict:
"""Parse and validate the input arguments against the schema.
Args:
raw_args: The raw arguments to parse, either as a string or dict
Returns:
The validated arguments as a dictionary
"""
if isinstance(raw_args, str):
try:
import json
raw_args = json.loads(raw_args)
except json.JSONDecodeError as e:
raise ValueError(f"Failed to parse arguments as JSON: {e}")
try:
validated_args = self.args_schema.model_validate(raw_args)
return validated_args.model_dump()
except Exception as e:
raise ValueError(f"Arguments validation failed: {e}")
async def ainvoke(
self,
input: Union[str, dict],
config: Optional[dict] = None,
**kwargs: Any,
) -> Any:
"""Asynchronously invoke the tool.
Args:
input: The input arguments
config: Optional configuration
**kwargs: Additional keyword arguments
Returns:
The result of the tool execution
"""
parsed_args = self._parse_args(input)
if inspect.iscoroutinefunction(self.func):
return await self.func(**parsed_args, **kwargs)
else:
# Run sync functions in a thread pool
import asyncio
return await asyncio.get_event_loop().run_in_executor(
None, lambda: self.func(**parsed_args, **kwargs)
)
def _run(self, *args, **kwargs) -> Any:
"""Legacy method for compatibility."""
# Convert args/kwargs to our expected format
input_dict = dict(zip(self.args_schema.model_fields.keys(), args))
input_dict.update(kwargs)
return self.invoke(input_dict)
def invoke(
self, input: Union[str, dict], config: Optional[dict] = None, **kwargs: Any
) -> Any:
"""Main method for tool execution."""
parsed_args = self._parse_args(input)
return self.func(**parsed_args, **kwargs)
@property
def args(self) -> dict:
"""Get the tool's input arguments schema."""
return self.args_schema.model_json_schema()["properties"]
def __repr__(self) -> str:
return (
f"CrewStructuredTool(name='{self.name}', description='{self.description}')"
)

View File

@@ -11,7 +11,7 @@
"role_playing": "You are {role}. {backstory}\nYour personal goal is: {goal}",
"tools": "\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nUse the following format:\n\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple python dictionary, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n\nOnce all necessary information is gathered:\n\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n",
"no_tools": "\nTo give my best complete final answer to the task use the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!",
"format": "I MUST either use a tool (use one at time) OR give my best final answer not both at the same time. To Use the following format:\n\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action, dictionary enclosed in curly braces\nObservation: the result of the action\n... (this Thought/Action/Action Input/Result can repeat N times)\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n",
"format": "I MUST either use a tool (use one at time) OR give my best final answer not both at the same time. To Use the following format:\n\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action, dictionary enclosed in curly braces\nObservation: the result of the action\n... (this Thought/Action/Action Input/Result can repeat N times)\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n ",
"final_answer_format": "If you don't need to use any more tools, you must give your best complete final answer, make sure it satisfy the expect criteria, use the EXACT format below:\n\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\n\n",
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nI just remembered the expected format I must follow:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Result can repeat N times)\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n",
"task_with_context": "{task}\n\nThis is the context you're working with:\n{context}",
@@ -21,8 +21,7 @@
"summarizer_system_message": "You are a helpful assistant that summarizes text.",
"sumamrize_instruction": "Summarize the following text, make sure to include all the important information: {group}",
"summary": "This is a summary of our conversation so far:\n{merged_summary}",
"manager_request": "Your best answer to your coworker asking you this, accounting for the context shared.",
"formatted_task_instructions": "Ensure your final answer contains only the content in the following format: {output_format}\n\nEnsure the final output does not include any code block markers like ```json or ```python."
"manager_request": "Your best answer to your coworker asking you this, accounting for the context shared."
},
"errors": {
"force_final_answer_error": "You can't keep going, this was the best you could do.\n {formatted_answer.text}",

View File

@@ -1,6 +1,6 @@
import json
import re
from typing import Any, Optional, Type, Union, get_args, get_origin
from typing import Any, Optional, Type, Union
from pydantic import BaseModel, ValidationError
@@ -214,38 +214,3 @@ def create_converter(
raise Exception("No output converter found or set.")
return converter
def generate_model_description(model: Type[BaseModel]) -> str:
"""
Generate a string description of a Pydantic model's fields and their types.
This function takes a Pydantic model class and returns a string that describes
the model's fields and their respective types. The description includes handling
of complex types such as `Optional`, `List`, and `Dict`, as well as nested Pydantic
models.
"""
def describe_field(field_type):
origin = get_origin(field_type)
args = get_args(field_type)
if origin is Union and type(None) in args:
non_none_args = [arg for arg in args if arg is not type(None)]
return f"Optional[{describe_field(non_none_args[0])}]"
elif origin is list:
return f"List[{describe_field(args[0])}]"
elif origin is dict:
key_type = describe_field(args[0])
value_type = describe_field(args[1])
return f"Dict[{key_type}, {value_type}]"
elif isinstance(field_type, type) and issubclass(field_type, BaseModel):
return generate_model_description(field_type)
else:
return field_type.__name__
fields = model.__annotations__
field_descriptions = [
f'"{name}": {describe_field(type_)}' for name, type_ in fields.items()
]
return "{\n " + ",\n ".join(field_descriptions) + "\n}"

View File

@@ -3,19 +3,20 @@
import os
from unittest import mock
from unittest.mock import patch
import pytest
from crewai import Agent, Crew, Task
from crewai.agents.cache import CacheHandler
from crewai.agents.crew_agent_executor import CrewAgentExecutor
from crewai.agents.parser import AgentAction, CrewAgentParser, OutputParserException
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
from crewai.llm import LLM
from crewai.tools import tool
from crewai.tools.tool_calling import InstructorToolCalling
from crewai.tools.tool_usage import ToolUsage
from crewai.tools.tool_usage_events import ToolUsageFinished
from crewai.utilities import RPMController
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
from crewai.utilities.events import Emitter
@@ -1583,22 +1584,21 @@ def test_agent_with_knowledge_sources():
string_source = StringKnowledgeSource(
content=content, metadata={"preference": "personal"}
)
with patch(
"crewai.knowledge.storage.knowledge_storage.KnowledgeStorage"
) as MockKnowledge:
with patch('crewai.knowledge.storage.knowledge_storage.KnowledgeStorage') as MockKnowledge:
mock_knowledge_instance = MockKnowledge.return_value
mock_knowledge_instance.sources = [string_source]
mock_knowledge_instance.query.return_value = [
{"content": content, "metadata": {"preference": "personal"}}
]
mock_knowledge_instance.query.return_value = [{
"content": content,
"metadata": {"preference": "personal"}
}]
agent = Agent(
role="Information Agent",
goal="Provide information based on knowledge sources",
backstory="You have access to specific knowledge sources.",
llm=LLM(model="gpt-4o-mini"),
knowledge_sources=[string_source],
)
# Create a task that requires the agent to use the knowledge
@@ -1613,3 +1613,4 @@ def test_agent_with_knowledge_sources():
# Assert that the agent provides the correct information
assert "blue" in result.raw.lower()

View File

@@ -1,415 +1,4 @@
interactions:
- request:
body: '{"input": ["Brandon''s favorite color is blue and he likes Mexican food."],
"model": "text-embedding-3-small", "encoding_format": "base64"}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '138'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.52.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.52.1
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.9
method: POST
uri: https://api.openai.com/v1/embeddings
response:
body:
string: !!binary |
H4sIAAAAAAAAA1R6SxOyurrmfP+KVWtKn5KLkGTNuIuABAFRu7q6QJGLIAKSQE6d/96l367T3RMH
EEpI8j63N//5r7/++rvPm+L2+fufv/5u6+nz9//4Xrtnn+zvf/76n//666+//vrP3+//N7Lo8uJ+
r1/lb/jvZv26F8vf//zF//eV/zvon7/+jq6vjKh9O4J16w0pDD6Sje2RqsYS44hDcWl9cPKQzIYP
H8KK5NDZkVu+3eVi0B0ntNmvzxlIl3fM8DMLoM+bIy5Ux2e0iE4p2g/aRIr0JXu0ntIb1N1dFLDx
vowUa0+I7jsjmhXu9AKr4lIODLV4wOaJfUaWM6xD7yWLWAN5O64qLkxZGDl1trNOzJmJMxdZUpbh
AEQuW8BVVtEHpGdsyZmZi5p5NBE+0gc+HuXJmyk6l1ADg42tKQ9y0Xw/fPh63+7EZnXH2OF8HODb
G0ZiuboPWMlffRCXuw/WsMAD5omxiNpX0c7bfPUNcYtoClEq74JJuHfjcnknNjrPtoEjd5FZ9zjC
AVrdxyOH6fUEzJUVBxl740z2mWwCQRrXHp128E5uN6UxlmI1IcJucCR76ebE0t6UAiW/imdsTI9P
vJbUKpCY+SI2z0YdLwN0dODkyCe5qUYetTe0R/PoaiSXjz5Y5wSY0OLiS7A62gxWWYlUsOmVDdkJ
6p3xb28p0et2+BDtVdFmQU5JZe58UcmDNi0j8Bqm6NpxyvxeMx8ICbhPf74n/+y5ZlHOvgnfA7MD
fnPaeWTIqg5VoZrOfaJazaeOjhdErOOeRNNUNnT/5ETITnxGwvfrFbOukCMYDeEb37I7x0ikyTMQ
RqiS073hgQTTJoNR7gREraukkYKqjZDw4iNi4EUASxTJIfi+Dy4CIcqlzUd6wpFuP/g2hed8ZUvl
ImSmhwBpWwDmy92BSnD3pBmMYW7wz4Wm8JiNJ2ycGziyOAo7NFwNOG/GNI1Fa/IdMPZjSCzmXWM2
XbcBykOSEi+vroy5Lc+hoTpe8f1xVwDdP0URzdltxfErujMeKFoK92QKZkELhpH2e/MJJXg1sc1h
f+TpCnz45CoZBwduGNlukp/w8fzYRMWeGovT5ZiBA2fcsNayyiA1Vbaw3h+35FJ2QSPa7l0FjYNR
sGZlbgiZeBsgV8AD0ept7bHdtHTQp71P8kKwgOT1bwWcac8RCzgxY5zjmOhb/9hF8Pv+99BFvirW
RBdO2PjdV+hV90jxfb/nhswZbPHBwdnc2Q1/3SYOOk8VJnGmqkB4j28XlIZQ4WKeNLboB0uBNr+M
OHfNvUdgOmaKogm3AKJM86jkeyqULOOA3Z23b4TbyiWwGqcjuWkGG1cbjiqc9W2LI77Y5TxnNAPS
l7s7z9v9yhbbEQNU05GSfVPHMZPPCUXdLsJEm2bVkPrdncJPs7pEl7n7uF5TYUacPS3kxuucQa/b
mwsWyRzJ+WSWDb0Ogw85zuxJEQhrvJJHMMHU3tbkIgbM+K6/D6mJg5mpOg/WBMISnt8mJSH2ypjd
02UFV32PsG0H9jiFXNDBIg522Iw0G/C6qWaoql9hIH1eptEb1HPh+Z7xZDcXHhO28FgDf7M7EzNZ
ImPZT/0WJjCqiHF/xYC/8+YNlqU/4XQKckNQtixAerZW+EB86C2+gRzltegj1pygNtjKmRBO5/aE
PXYQxvVxhD2Us8dz5sxYbZYeyheY93FDHEWVwezpRIduv1kCoGxgTkMueMJtXj3m6n6/jsO4W2uU
ctGVaNaCWB89/At8noUj9l7v3qDhA1HYRGqAo3F4MjLurwlsnAOaFWojbzrLVgkGQ2/w93kw303e
hJNzU7D33Z/irv/w4Mxbd2LVbZhTqrY+Wo+Yke/8MVaVNx0KZ8/AarA3mYSkt/vbXwSzmAJWzRaE
WwNFWCPxOWYv5NpQKL6rg5Gf8wWdHZC6Z42408M1hMYPXLhB6mfmXN8An/OkZJALQg7nryLJ+dNl
t8IT97ZI5Pv6KMQcr8P47c8kF+yrMafQc+ByhiF5OMfKeDc1jRB0xBfRzivJ+/e1KZC5OccEb0Iv
pp9WGeCzNTUSgavtSZ8NmeQnuRbYEyutYdd8f4PVQsbZlJoyZ9qiDGg7jArZ9a0zCicaTkgqnICc
goM6So9ligBneYiol/t9XL1n6kDbsFOciI4EmKA6HZRFnuCgS7OYmkeVgyJ/5oK2mp1G6AJtRuKw
tX71PFKS1BdoF5ZOsOuasXCoLQ5lYhcEm3u5HUnxuvRQ5/QBm77TNCs6diEK0kjGh8TUYvEqBDXg
dw3B6vZRMipxVx3eNneT3PqCNst12utwpMoHu88aj3/4d9yXQtB7fuMJxlhMIM3VkNyPL+SRbmp8
dNYzhxykvdj86lFxA/4dyOd2y97+eU2Q62UutqcAGFTYbhxwPYsP7J6hnYvv546ibjU7nGxOO4N+
dm8RNuU9w/up2jerfBI4FBoPfqbzW2df/FhBD8twVgrJyufLduKhYT235AoC7AmCcuLQjz+9bdYa
LCxsCJuPqmArEolBTrfJh8Ham8TFG4+x4EVrpFWCiK8xW73P8S5G6HBOeLJ7XWTjM1abJ2zKR0b2
ERximhSxCphsNwEyT6qxXu4qhAfxgYkXPx/xcBXsEup1Qr77TfMEAEoXSafphsO+9YDkyyqFV91D
2LNkIZ4QfVAo+/qFuKos58uhvwYgMZUB+59yMoi1D7agcW8qvlZ8wwTKZzoIFk4IxHlX5OzQGh3S
C6xif653jeAbgou4k+QH0sbkAbmKbQIn4tXEL5idr5xdRei3vvrecPOvHtMh2tEJX6L2BiQQczfI
nTMVa5qDY6qc2hCapeIQ/Oy8mNojuMHWSVziOV7L6O295ZFlPPRAnILce+9eOx1ujU1EDhv76JGZ
rye0X1olWFH0Hmku1StaQHANqPCswCInDwfah5zN4nCSxlmr31uQX/kzdsikGbQhXgbBe/KJ7vt1
Mx1K4kC3Rwv2QfoGbHoNLhA9O8deXslswcJxBenpPM/1STXZCt7aCt6P9Ups6ITGOl2OF1S7/IT3
Q995rPkkHfhczIBkYZ81LEVvCJVr/sZ4yT7N8uNL/pio2FgbM2b7vErQsBW62WukKmaOp98gPvQU
746UH1dasUFZjwdGsiGPGM0cZqIfflivl8eEQ38MYKSdOOLNz5fBskQM4DG2HHLcRp+G5tJA4cZO
LHJqbhfv7VjnLaxje4+1YNfm7GFVNdS74o4xf/JHaZu8eDidX6dZfnduw8pUvaB2M2kk1E5681Gf
7wy6Wd3OG2BJ47I9owJuaXInZ+5wMBa9oSp6JpcVWweejUvwXBX0fqWvmTOOT0CvQ+0j6c27+HLS
K0/yzIP40+fESDgrpgXtHESseE/MC2x//gDCLz/gI4mleDAmoYaJFAfYyCRnFBLtrSrcJ78GpXKg
+Wp6doaWMxfO8EJIzqposMH5Ck9fPdQyUeTeM+yVzY24F073/vAjRrcsAEuyxixnOxWyN3TwowCa
x9/elEcEvlVyxX2RC+tNLEFGVBXvPq+nQfXjmiJtN814N7oPsGizoAMsqD1JoUO95bYvS5i12w35
+iuPTw7QhbZXOsQSTAjYpgsVNNsnk1jqWQdCBLGp/Nbr8Z1fihKgwCDiM3LsGz+mPz3RMreaueOu
ZhOMbAi+84V9HYqAGGWdQUeFJdkJ1wHQGN19+NVPJP/iA9l04RZ2gXcIVplD49xu9k/w9SfYtPQQ
TFna6vIXD3GRXR+MlqKjwo2dWlgFd8tjcenWUFsrK+A5a82JLzsrPImHGIeusAWf0gp5REa0EKfs
5mb96gtAmReT3Z4pBhOL4wUmWE5JCG3GPjHOOCBp3UyyL/5OSzVAcFfgQFQAHuxTxLyLfvW/fyWZ
wdjtzsNrJPkkQN4cU2LTG8LLc49vjyHwlh1aHHR9XKYgUaW78UxkcQvuUu1h9dYfY1Y2a4LUZ9Fi
mz7rZo4Ig2hRGndeY+5t0NO8KBCbDZlFzR+9z0/PHorDik351jA2no0QUn7eYQ/d1FFKyVaF6TLJ
AZxCKR7icFkhON4oca/CGzBzEHV47aBCwjMfgM9eiUQUdaJL9L7ywdI+jhC+7ms1z02HPcnwPz4s
9HI70+C1aT6H+sDBjIJpbp6fblwSrVKRATmC7Xbae8tYSR0q0DnAvrPv4tX0ggsUBWXF9u0hg9Wa
TAfOH22aKd3L8XJIEh0pIZICGCKn+SjrC8JyXxrkV1+rkFkDsPK5+OJ9wliEmx4STRyIfrty46R7
LxP6UuViH4OJzb0lQrCiJ8KHjb0Ys9PiGvpS42JcjVM81cFhgI/EkAg2gBzTodmY8PEk9jySPADC
Dy/KMpjwjq8bY3GCi40sIvozSvkpXuK7qYBfXsDp+B2TXVBSdPRKBSelrMb0MjslFHAn4MAzvEa4
TnsVivbOwXtH8xq2U8YanquywY8dbuPFCUIbaVJ9IWb9wmC98XcHXIHdY/OrFz5zZftIpU6Fo+Ql
AfY6vFXQ9MYLY+dYeZ+V9QNMpGOA3eMW5yy9HraQls8r9rN7wWZcg4uiH98lMUu5jBmHOwr4KZ1m
sNsPBnsbUgEv28nEafKJjekyOzVcdrFGnJTScfnyO3zHvvzFgxqML3JxoQr1dQbpdoyXhWt70KBp
T45W3Hj0c8opPGbv0x/8FHkZi6CUVpm4MBjzKd2EK1SUdUfMNT16C3s0NxhAVGFzTReDlI2S/vT+
zw+wZb1x9c+fBP3SPkdB2YIA6nbPcGDG5bhQ/psnHA4GSbrVaCi/0S/oi1fYkK00nhXepuCnP01j
//nlFeoffZcZqsDWttxS2BuvNhB1JWuW/aWZ4Hs9JcRjh1Oz5kqZgTGoHeyV8z4Wu3t0g0IXF9jX
DNasb0VWlDO/u8+0b0dGq+SiQCjt7mSX59zIfFld0V6KbsRQ9YTRQlwu6Dw1GBuHbTQu751lw6y+
lWQfs9WYBcBT+Oa2PH5o/A18wk17gZy1R0S73+VxDOGDQqepsrn+6oXVnmUb2u0YzjLaReMY2FqG
zlfuFFBfDg1yhdsOJnv1go8oeXvrLgNb2L5uLbmZQhZPfDanym7y6mCm/jamoXncwstHd4hGYilf
HOuxhb551LD9nR+haqoB8Ql9zGUXg3yqQ6WDUe4G2PVFBtZ9ppeovAwFMT6VA+ijrVx08i6QnJAm
xQQ6SwFgUjfE3B+ezQLjz6qU4HCZ0TBrsQjlsoen9PrGxo1Oxvp6zTa88cUl4EQrHyldQfDT77O0
Z5nBnnRfwifsgyD16bahp7CyEf9gHta/+dfg6S8Vqv7BIbiKNvnin9cUWqkGMZbPvUcBE02UFcmC
bd47NQvtevtP/Rxi3RqF7OKJysU4bIl1Uex4ZtlGh226pD89bazVRBxoAoXH3qtIYkHTlRAWw5rM
Xf/qDXaZrjO8QbWd+5PcgzWLzxn45o/ELaQ2JpkcRWizuLfg43DzuJzRMUKb6Vxg+7K1GSlTJ/vp
nz954h88IZlpY1OV7t4snxCEFLQWUbs4jz950tZQ8J30h7dsjtBHgd4phbP8xZ+JHk3zT97JU8yP
7f21pD8/MG8sp2dsyN5P+F2fH54CVoRzDb3w/CYOXzbjRAtRh5rnHcjesqdx+uGnWssL9lV/ZMOi
miWMrm1GvCsb4uGrZ5T98lKwNdQD+zydlwg3yXIk3zwlFsugSKF+TnWiAf3SfIbzCOE3P8G7+1YH
c9HKInS9i0vuVcrlP/xTbvtBI8ZJOBj89UYj+PUHxFfsk8FIBJ6wqNN6Xr75AH3d9yuM1U1EDM/Z
xVT8KM6PL8gjkdw/zwPOCCMc5pt6ZK3a32Cp5voffqHy1ejg8bW1/uQJs3hbO3gJb/08c7rokXog
PTx4wzZYVdx7c4HxDFuirgTPQ51Pe7tTUUVc+uXHt0E39qUDM75Z+ARdMpL7eh9++UcAszsHxovi
9ZALIg7/9uuHNMCFUzmfsbtUVrOOUvwEjw+/w0cIoLFOilrCMqkpwa2oGqK7zROQfzgrAPTuNqtv
5Rxc/DAJlP7TxCy63zuF3ophfrpYNaRrrhVQvSln7BT1OV/iJnSh+zJtHGz3EZBOwXc/36sn/vkL
+tHSDgbtNZmnKTzHC0n3AaDx6Y09x7MYH3J2By3pkuH4sI2atToaEeqekhjU1rFpvvpY/9V3MH/x
e7ot5/q3f4iWo8abL8O6RVe54cnhtFI2teHeh21xaogZdly8Nutehffi9cD+M4bsu17/5q+oFvWG
8qX8hF99jC097Ixl3jxUmL3PxTzPMQ9oeRw5cJiBNvOzh3KKdhqHtlMwYz2An6YDIpvhNx/8g+8M
xFzxy2/IzpvuMZX5VwF/eYMhF02+zvVHhbW8q7H5HF/ewqvvCLatvGBNCo+M78rehFmrbIhbvydv
dcAygFwPXewlSwCeQNmncMcIxe63/7Cc0TWEm367ITvZW0bqP60CXJfNK+D9jZf3C7JFIOCngHH+
EgymPdUJ/fT/1+8Za6ChAaT1s8DOwbKbuRkF+qfedBrHIz10ykWRDsUGa/ONNPR11yhK/EIKWlCV
bJ06LwHUYzlxj9qu+fBqFUEVzhoxqqsyvr/5E0qIsXzzJZwTc+B0eE0yNfjpo6+euMEXf3eJ81z0
fNnCYwm0iOSzEBzKkemhUUNIGhwsp/Jh0G9+98tLyK493sbJGy4T3EFxCaauPRjT+3XK0M8P2unG
yIn2VGcwLFyPMSdx8RRwEoR9KJ6+/BMz6d12Hbg7Txvb5fAZV3GbFmhz30Y4PxyqfBl7nYdD5HA/
PjQ+vutcgLwJK/yA0c4gnnng4YGpdBZ2kdaw2k9FuDkmK/bbuY3Z4Xzt4VzPGPu3Kv2TD0Gef+6J
ulwDY8nEWw8P55SfWz20DSkwkgKam1McCM3tYqyWur+AL37O1TtLDOb17y0cIpeb++5xZF8+N0HF
ogfGUvEcmXfxdHjaJEnw81eLME06/OlFjZYdm++nDMKuywbsJGrbLKW/qaGbZhL++klDUA+bEKgx
SwP+rLgerWqdh6B8ecReo8ggQQxS+Jblilgypg2t1XwG6Q2AWawbCsj4RAHgYMWTn///9ss4+O2P
Eb35uLk4vqseLuvugvdiW46fpdFWCG/caUaq68fSLQtt9NVPxE+e+fjGxWuA3zyFHKqUi//4QXOH
bvj+7U+txKh5ePZJO0uvi+ytKTRcCK3M+fr3eGT7/J2CP36HdXXz1acO8jsJYEcMmDdonkThKLHx
33x5kWz1h9dkp2dlvGR9PSFtN8+BWFzuxsd5Mwi++DYjIzZzgtM+BN+8nHjf/owoS635wxtiCDEZ
yTiqBUKoJcH6JJ3BBFINaOqjZ4CyjWoIgZqKCl66/Q+/wPLLN0svn4lKazsXrSIL5Az7KSmcJY9X
zZNW2BX8LUD06jWrlJIUNoft49tPWvNVM682JFWfEGuoXbBUeCpgf5s6op3KjUffg5v8/CHR19YA
i2vONXBKGwR8RXuP5cfHVzGjKrjMx21D8+uRg0pUj1jP5N744Te8aMkB7z+XJV/uQhMi0TPzWc7p
NK5nQdpC6i15cGblLl9Rf7lA/E6mWf7ll3654aGWi7d51i7aKGbedgKHXjWIOd0PQOh76ENtbSxs
IU3K2WpeInibmUAOdzLGc5m8n1C3B4Z12xljKtzzEP70o688dE/imhGCKjf0WXad3vj5fUCCizsT
tA+AOF2uF/jt1+H7xj4aPGCcCWtuNfEh26ger39SH0aV12PfDmuwHprn+ut3BcJ4XZvuujcoEqP+
TX54wbbjEf70a8B9+8Xffm0NE/8mkW9e5/HS9kC3v/zZIL0VC+HCbuiL7zNnuX38m1+E5keCv3yR
s8DeZ6DvLzXOdt5+pGP3VCE+rg+yM469t66VHiFcKkqgfPO0n/9Gv/5JUCC1EU3PvqC2ODfYmY+X
hixMTmDYcyrO80oGFOxYCg311QZL8ZlzlqIKomN9aDA+wtF7/+rfbt8hDrcPlYlWEQXo1z+9j+3b
WGperSFz2oHsv/ky/X4f4GDDY5Xur9/9uVnhYyvy2AoOarOGLIr+9Ce0JoWA4WcUIIFkD+yfNk9v
CeOhB1HHu/j266/J3rH/zR9+nOYjWKPaVYF7FVLiKRsY08joa/D371TAf/3rr7/+1++EQdffi/Z7
MOBTLJ//+O+jAv8h/cfUZW375xjCPGVl8fc//z6B8Pd77Lv3539/+mfxmv7+5y9B/HPW4O9P/8na
//f6v75/9V//+j8AAAD//wMAOXBqMeAgAAA=
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8e94839cd9e9967f-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 27 Nov 2024 19:27:11 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=XviX9Hjm.Uy8aR.6KFXUsi._PlZSGHz_33BG8yN1gNU-1732735631-1.0.1.1-xpDmkFSh5aO2fugj8VCyrc23NL7wf6Q8eq_yaxcwutJZAO5nSx9Eeqko_4UhxH4IQBfS8cJSaEmHnXWPD6lTJg;
path=/; expires=Wed, 27-Nov-24 19:57:11 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=Xz2QlgphZCJYG8KTd5zZKB.lSwPBCu24Nwv2aB6FkeE-1732735631371-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-allow-origin:
- '*'
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
openai-model:
- text-embedding-3-small
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '272'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '10000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '9999986'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_5cba1175a36bccbbad92e3ef21b7021d
status:
code: 200
message: OK
- request:
body: '{"input": ["What is Brandon''s favorite color? This is the expect criteria
for your final answer: Brandon''s favorite color. you MUST return the actual
complete content as the final answer, not a summary."], "model": "text-embedding-3-small",
"encoding_format": "base64"}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '270'
content-type:
- application/json
cookie:
- __cf_bm=XviX9Hjm.Uy8aR.6KFXUsi._PlZSGHz_33BG8yN1gNU-1732735631-1.0.1.1-xpDmkFSh5aO2fugj8VCyrc23NL7wf6Q8eq_yaxcwutJZAO5nSx9Eeqko_4UhxH4IQBfS8cJSaEmHnXWPD6lTJg;
_cfuvid=Xz2QlgphZCJYG8KTd5zZKB.lSwPBCu24Nwv2aB6FkeE-1732735631371-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.52.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.52.1
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.9
method: POST
uri: https://api.openai.com/v1/embeddings
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SaW6+6Srfm799PsbJu6R05SVWtO84gIIXgATudDiCiICKHKqB29nfv6P/N7u6b
mUwmM0YY4xm/5xn1n//666+/u7wui+nvf/76+/Ucp7//x/faLZuyv//563/+66+//vrrP38//787
yzYvb7fnu/rd/vvj830rl7//+Yv/7yv/96Z//vrbVLoSHzk5BMtWoT1gp3eFzUA0a2ZdPBt1Tqni
qC3bejWaQoT7x7TDxVCH9XwNkgppWjPT01tv/PlmCzGAy97HjkOVehnWOEbb0/tE3fEjAtoiL4RQ
1Tuqe2UNFusgp8hF3ZbGcqkPM55dD3oW14QiPL6M5STtPXg4WB+qSkPoL0FezEr0oQa1QWb7pA21
M/IfsocN8pRrmozbCH4+4wVrougOq6UdI+j2OkftUt36i58tPNr6XYcv5dgl62H7sNHVFrRQvkk6
E98Cb0MzqS/USK92LRjvi4uKZykRpFod621tE8IYKnesXk/nemmyxUNbtkPhKX6eAH+HzYruRgBw
FgbHgaj3voflwJ/oOWCHepWjU4WOY0OxPRJhYHIgR8g/lgcamp5ufH7ft9kUFmHxq2OST9RIObvP
E7aEdcxXWfIyiMb9gP2evcHq3GaC0pyvaIpELl+O3auAPkkRPd2iNZ8d7xkjAB85vWp+BNg4yg20
vGWPjZovfLLcPxFUOINQPNkfRqL4GsPTra+xbjivYV6CTIRBvS8Ia0acME+ORgRV/0JtX7LBzI1x
gcZs2mI3TDbGx2cfDh3FbMKm9Vj9pZXkCj1926b6mzv6y7JLnyhq9xJNx7dvsNB/9uh1rR/Yv+xU
xs9NpSNguSJNl/o08K9ENRFMlhinyf7GVo14Z+h3p56a3tXyRbtrTNh7qx9uww3JV+ZsdEiEXUCA
p0aMvIuHCnzhaRGxVSRAQoFfoRsU55A/jxNb7a6xYXrqSnratDswVHnUI3NGEY6ndWGLz7/FraaV
Gqnj97GeWCrayEr3Nj5kFkrmV+LaaCW2j4scbfxF2TkZmtrnTIRbaNe87FUKOrc2CpkhVMN6Y+cZ
eh8J0b3+dH2RS6EKT+93RIPlMuak1PIWbk9li7WGbQEJi8cIz/F7wf505BhFTqxCFFZHqpouNoSb
karg6Zs2Dje2b/A21J8w0wijxgm+jYVElxaas/j41fPAO14fw+Z5GGis9m1OIw25UEW8hWOrKwb2
wdsQJe/5QB3b1WvpKUc61E7BQDOy8Wu2Owwi/NYTjobUSvi0qUbEKVNKo41a1Xy0eY1wubclNvNG
BeKJ10XUlqcNvvkVlzO69DMkHJ/h2NkdcmkCaQyDa0zDw9k4DPNterqok5eY7nPzaQx8ckyRW7ws
fBTMQyLtek6Bo3Dc40O5KYxVtZUnxI38ouddefcX7sNM9AgkiYDCueQiWrQQprpa02N/3Pm8e91x
0EweFxoUUmmw8tKc0VFMJ5rvL9hYpNGBiLpADOVgdAzGTJ7A1L0cv+8nSKjwvKvQ7wKArZrekqkc
9Bkik1zIcnAcX7g+mxhGReJRFd1UY7mMh1U53pMXDi7aceDxYusI+MGOWvLhMsypP7conxaHXrZ6
ZUi7z1aF1lMzablEDzAjS+8gKCGjxXi/5mt1m1W0N70HAVDWh/U53Voo27xHT6XA1UN2m57gp4fX
+BzUNXdaRmWKVxs7+zpN5g042fAePS1q37gpJ9ymk8E5HC9kVcfYWLqpM0HpBhnNLu7GXy11OUM9
2gfU6ZGVTyfxGaKL8RSJUFOUrGjZBbAj/Aur8zEyZvCuop+e4ov7ngB9ndYUusXbwmq2/9Qz/8kC
kDLzEo6h5Rn8M5R52MksJvnqtQbTx70CP2xhNE6KzCDP6dRAOSstqqu9nfBQ7GWlMSeDiF895aFk
8fATGTsCmrM7iGHYcGjOVhXvJB8n0vMweJAIB0B3Tg/AIghZBdjVHqkPlTBfN++EoFLYLnj/4vtk
Pet9Ay4RR8nC7VsgFUqjoP0x34Tw+/wXZYczqGfGjjrORADrpRqC8tUn2H6Kaj7uH1WJwsp2KC7P
j3ou5NsTSrqXk0LjJ38e+08Ip/l2o7mbm4OEqpcJp5Gs2GGhnn/7t0LYEJdw3usT4K9ZFKNeMxE9
rbctWDeh2qHD/jaR/iHmg3D1LiYsNgql5vqy/XEJYhEN1/SNi+zs+NKmuabw9MjD8J42lE12zMXg
8nBK7CD7Msx7hT2RuVgbfIOsZms04hg4e+5Dw8/WGERw1ER0zY57ihspHebEiM0/+v99nuxThH6K
1ACohMsOXSLct88CHcdCxcVyqgb6KMonLARo0LBvIaN8sjnCQzM1Yf3VI1HSkAh3ub1SYz+dgPDt
P4Bes08PHF7r73zo0NRmGrVcIiczXmwVSSMWcVgQh5GMNC38zlOqK5phMIljBfy9Lxa/XCA5e6lC
t7fb4jsklr/m53sGC/9EaWx1cFgHqihgHrQyfJXL21gPCuEAMgUb2+eXkazNoslINfYrvdjucxDe
lR3/qc8Ad3391dcGxGiHaPkYOYMlx50HN1vzTDam0oFVvHYmbB8bEM5GGPkzy08lMrQrxXF/2yQU
9FkDn/mlCeVX+DCk0SYdhEBzsX21lWRxHjcIh3vzIcr91AzrTplt5NXpjUbWlOfSb15BdXfBp6nO
fWHZRRUonwcRX779LorXyoRekJxC3tBtNpHbNoPJ0mB8y93rsCyA60GAi4U0NHTY8rbTGK5ZGVH/
oZ395TLyIzyLeMRB+26MtfA1GzWD6BH4Mut6oaUUw+/7oOHGPQOW2EEGKpkryVh5DVtpnulwimUB
G/pmmy9tVBGIH9crtu+PiX3MQuWR3116jLNNnYwzFY+I7NcWBzbxGcuGZ4SeRJypq+aeIejtIUbv
6t5Rd3l8ADPpGoDjHAzUIlc9Z3Iwx2ir8QMtVmUEy+vDZqBwGsHOJ2vAPMUiB9PXsqfYhPyw4tu7
RXGND1TbJq4vmMWwwl8/75PJAGu9W234QDMkgNI4kcSdAeF75+jhvDFlY/HZg0OdM5XUfEu7gV5U
4sKwTWUi/Hj4/fRGUIj7B/3Nb3Ze1AqW5aCQ5axZTPz1G6VyiHPhYSbSQdoRuNk8RLx3ihbMNxvF
0NWZi1XFnYZ5oyge6JxCpUkpL/XCv28zaA/kRr+8VrP1NvLwurQmVtGtMqh371rgnuuVekYRJGsU
wxA25YfSMlY8QwSAb+G96niqmnadLEXoZ5B/t1dsIV/M1xA7MmTi80XV+QzYCELZg+8dXqjx5SFy
vJAM8Msg0N37tQHMuug2IqaT0n3s1fm0PwRHOBusI/XB5P15eekcDF24CbeaP//uN2EpgAU7dIuB
RNc9gRe+7ak+JG098a3Cwzdv1TQMVT+fuKhToTmFr3DrPrGxzkYgwkfva9R36iVfLXV7BGWYFjQ9
tBbgX8McopdStNj2pRbMpp3ocKESpCYN30y8GFYFr43nh9xQYsDGKa/g8e6KWMU3LRFGrYDg3/1o
hoOYOZsR5sA+0NKiJGdffQP9qur4YIqeIYALteGaUYJN9fWuV3ZKeViUqKeHzyQPC2RdA8X3NcW6
XD6H6WZEOvryPjUzrcoXWm7iH99Q3WpePtHhU//xSXgr9HT47LjTGYnGcqNaRfWcPw4mD6zjpIQC
j30g7Q6uDA9yOFO9E9/5CndLBsMYjOGK0kc+X9lwhNzuKVCPbPxByNu0AV9eCuvNxCcfWm4i2F7k
OzX7M8+W6Dj3yJN2Dtad3SGZn+HMw3kwSuoiscwXafBseCDZig2XST7ty1SGx7g3sCvDzTA9L+8Z
nuNzQv0XODI2PTsPcEZo0n2Q3RL2PKgB5G669u2/zpgLaW1gfH+daDY2qTG/xtwDd8MMfjzgrx5A
HBTGuiXbZDsZPV/PFaryzxEfwv49THnLntAy3z4O7fcjXxzXk+H3/7967LLlXYWxYplFgbVmIcaK
zeIJbnxzxzuhI2BIpmMA0+1DImttCgm15pMLovgFsdFpGlg/MaggOaRPXIbqkLMJgED+8bB34eyc
XVQ/VH48FNzjZVgX+ZD9mW+WdJEHFnW9CtvhmVH9GkmASa8ug1+/HQLJhcm868wGJu/1gLEJjwOf
mHcI7MSWqcZfd8mcGJkN+WnrkXZXbgyWvnMd/D7vUG8zRhYZnqHcRAaOBR8wOsihDP0yDkPus62H
1agXBf708Os/vv4h0tEnyPd4vy1eNT1EhQgy2DYUf5ZL8meeXrPzHmvsIRprsOljUMRHF2epbOWi
mPkqkF7zGe+6ewnmYJBlMHovL9y6VZ1QNppPJSGOTnduo7D5OJgipOjoUO0uUWM95/EIQQcqrEuN
MfCPDLu/+sI72zDYvLw8DhUbmeJ7I6U1Pa9hBo2T0RNUqAFg4syN8M7ZI1Fua2OsCWUtbF/nNASh
XDPxUM0Q/fjbwdwAmjPZejBiDw1fvnozJ8XkwZARnV7kQGbDYFUr/PF5KL9Vn1fZzoTFs5Dorlfi
XAgPNIY/Xryz1xEsT67o4d67x9gpRzcfA4tAyD/SC83zfZr3pza0YQZpQlW51hLhZdgy1O3oju2j
a/oizSGE4iX+hNv60eVTvGYcrO39OZTWM84Z6FEFp/l+o6b4PjDSqvQJvzwYbufRzJcIqgX68aS9
1bjk8/pEPKyTZ0VN87Mx1q/fRfvLWlDMHaxakLefFcKHXZBNkKyA7fqEg5E74JDzBhkwzcxd4Em+
Q8C1rZKZPMoeVq9jjctVCH1JcB/qH971JbfIF4UezqjbrwvZHPwXmF+DHMBqnQtsrbcto40j9JBP
YIl3OjkkAksTD5WvLqHRJJhM2Cq0g/tHm1HVCJrkj99dl83r933yUVFZCqvXLaa5Uy/Jui1RCD10
k0OlnZXkO18JnEbpQS3kn3P2yEELHlKj0PzbT2x+x2cI/KOMteBwZ0sKjinwOzP49q+f86LNt4r4
MN94l8cXn4BjM8PDqhJ868nHp+G6lvA1LykuUtbWKwCHDm1pHoegS/mELcKqwvYw3qijhQb49LXS
Qv38QHj/EUWDYPP4RHyWc4S3HrExX6ikw9dNn6jhsovxy9ugYPkaocGJqxc93RQwOXpnaj5RD5ha
VEeU0SQLZSSWyeyJ2hF+9FdEtd3ssnFuKhVW+XDEO2mf//zaEUaf9hhuv/5Pcg3TQ87rgkmSqeWw
ZmTXg3t/1KmtVh1bV6ewof9QPGzviiTh9Q/3BLp0TrAxArX++jMRcm/nFnLFzAYWkCGA6QtAUtb7
GCw+T3n44+v4FKg5I17MweGavb/5EjRGLunPsBDtFYfZoctniQQpRJtRwjmd7JqpOKwgubRNKM2M
sMULBPPH16TVLcHojGgolZS+PfI5stH4+nsbvploYV94NMnYLdYRjsJ5T6rooiai4NdHODuuRbGf
b2qaKxoH98LFDO0wRWAezXsEJBZgeiHHepjrjojw61+wHyp0WL88AL75Vfj65nnMUPQeHbHq4ETL
Jn/piKGiLy/TUJv0gb8GYgHNJOvozmmbhP0+r5YOHr42+Tuf9Q9XwVa4OXTPpZEh5Ju6g/zR2WOv
nZV8PXJ5DA9NI2CXlzvjW69H9LqpEw2VMDWWHXc7Kz9/oyreNl+NRlrBfVePpFkmZtw6m4UgV8Mq
7HrPHySH35VKmm8wdYrDka1oxSb0NYvHfrXxcnLHIIbF6FrUq7BRM0QeImqeLqTBh9Nz0dakEHZy
X1Pn3ZfDYm/VAPTe7NMgO7/9mbNeLRTS7UCd5rkAohH9qLyrW0exqvYG2zfZDL95cUhOVyFZbbES
EUVnh+rTbBsSdy56cOhs7ad/hnArahtyZ45gqx4sNivXoQJom9r0ui/2QMwcaVQoqzxazplSM+TE
OnijXqeWMvrf/EZL0bfffvyfSMjJdKi825Ba7BSwxamYin55cL4GB7bcY9QqIO10fJGDlK22JgUQ
lKJJvV3k+WJfzRHsHFr+8orhlzduXb27Ev6wUmOR4isHg/u7CuUn2ydsOtlH8PmQCxGa91zP7nrS
oXoWJqyXnliT/pEEf/TBG7p9It03F/NPPaLDEfof2NozLMaYxxq7ewnF7JRC/0hpKKRnNqzumGaw
0kiH9/PTZu2Zf83QRf02lHj3kQ/318eEvKDH2IhfLvtYhzn9w+96gLCxfMjEQTsxZfzl8YQv5FMF
O7mr6a7JnZx1WdOClO3tbz3U9RhtphEuenIiYn5/Gn/yiuVT+2Tzjkdj3U1lBMEpeYXcxh6M+YIi
EU1tNeOfP18e54cCz3T1Kf7q2S+/QnvhZOLd+3Vny/4DS3ASjB1ZyPuSzK2iiD9/EM7eWzFm8264
qDSn8I/fX7GxbYGcFRY9hyli5Jt/QFfvr9gEj3lYVXutgO91Cb7nhgLGK1MzeMg6FyeX+7tezqXE
wWPK8TgsZlaTV8oTcL+pMtVL7zxMDf/2lOWTFlhttHdOXiteYdJc+VDBVPDXZml4SA7ZE+8LXR66
LD+78LmWI3UPrcXWzxg2kEHpTIOoOSWsePAz3GmeTeD391U/ZQE0ymZHz1f9CdaptTIkR6tElmNp
JfypOhTocnEXsj++n4zB7S2EsnGbQp5Tc8D377sIhbhR6KXfLMZv3wEHax7pbk1gwurtSYbXqIrw
7hIN+eqOUQom7HnY06TKYNZ88iBPtC01DrqbzPwnDhTQrQs2dzViU+z7LqBFvYTvcuzy1YnOMhgk
hyOMFZdkdS21RT7gRWyRnBij4z0j+NVPfOYWM18Al0Po6kAi1Ll0xuxeNQiA9ulxoCQi+FjmuUV4
z24hnezd1++7PGw33imUEz/214MSqODCZe9QENYgX4U1DeAj+NYvSrX8l3fLwD/LWO2fpbEcO71C
uz3rqG5qTU0Kf9Sh5B5v9Ld/WM7lSf3pb/j4zvs/vClfrI5+87VE0gZVRYeGNmT0ZmlY7nHcwcl6
D1j75n2ren92QIoPHrYGE4PVKfP+D//iiml+t13zHshcSL/1niZrDVCs7OTnEE4VvOTUfj1mcDg4
H+wVymIsm4MOoWzcJ/zL0+a+hQq43O5PatySVz4tz7oBOy0WiPIcdcAnxctTfvnsrlfW5E++8C4I
wvsKSvl4r2EPm6e6p0fwiAbRaDarYuP3gA1SaoakFDcRJqAC2C6umiG89Boi9tnnNBYcM5fCBhHQ
r+5CHV/Xkt8+YKuepQk7n2Ie5msWRT++/PESWPpaaaAblGfqB3fozxLUPGRXqR7O+L43WOMIHfTu
IML7c6gwejxhHTZlesA72XgOYyEpDTRgUuFgVUa2lOZNhoQTM9IluzZfKtcLlWu0EGqfX3VOu0zr
0LnwImo0BBizeG9GtJy2ETVntwXLhK8EZndvoHrpekBYQE5gQiydqo2RGBIbzQqKO36mh5P/b96B
mrw5UX9+W8O6VmYIvnpE02/eNnx58c/z9JtzNzSa7q/w2kQKtj4P/K3vhIP3XYq++y/69XfEg0ex
LsKNqvb+4tJshDoaTIpzI2OEUw35l3dRYxt+8l8+BYe5L4nwuRs1PyNgw03e30KkNwmb1WDjgXJT
Rli/hqO/vlI4wjqpql8+nSzhXTPB5yPYf3hysWMxUr7zGtve0U7EUwBU8Nv/ubwq+GtnyArK38UZ
7yGrweJS1MFpPhNsDVBndFZfJQzxYSbbcyony6gqKgxxMmNnKmHOGvKswDev+O7/rEECPXrCctNu
qJahOV8e7uTBVV308HFLTZ+f4qMK34UwYlyj7UCeR72CaEMkrJmyAFh2e1VQsHYadi6XE6Cn2ljR
d/8UVviRAvbdb6KwCsQ/ebzIHEmF+40aU1V+7Q3GnbYEqOcWf/1A+W99+voN7KWylZB9qKswdMWI
qht9TBizLh3Ye+cau7b8ydnJyzv41R9sSvcGzD8/7aHTjkST0IDhu39VNptaJO/fPBUv6oigIl+p
fxy1WsjbqIHwYRZf3hj8oZc0FelXOcMnh9/m62/fsNnure/+McwXa/AL8N2X/clj6VACEXzrC5/k
gzTQildX+GGD+eVFMxHmoy8rbk321K7yEbBNc8hgqus11me8JJ++XtvffhLfaOcwAaCtDj6RtsN7
2r3BfHIeZ5RPXYpDVaDDeEfOCO99ufzqxV9tzDj4mLo9xWdnzv/4DS1PJxxGd7FerSxI4d+/UwH/
9a+//vpfvxMGbXcrX9+DAVO5TP/x30cF/kP6j7HNXq8/xxDImFXl3//8+wTC35+haz/T/566pnyP
f//zl4T+nDX4e+qm7PX/Xv/X96P+61//BwAA//8DAFT8PaLgIAAA
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8e9483a10d7c967f-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 27 Nov 2024 19:27:11 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-allow-origin:
- '*'
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
openai-model:
- text-embedding-3-small
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '68'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '10000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '9999953'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_09708939ca92f32d9d7143e8b7843b12
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "system", "content": "You are Information Agent.
You have access to specific knowledge sources.\nYour personal goal is: Provide
@@ -420,10 +9,9 @@ interactions:
depends on it!"}, {"role": "user", "content": "\nCurrent Task: What is Brandon''s
favorite color?\n\nThis is the expect criteria for your final answer: Brandon''s
favorite color.\nyou MUST return the actual complete content as the final answer,
not a summary.Additional Information: Brandon''s favorite color is blue and
he likes Mexican food.\n\nBegin! This is VERY important to you, use the tools
available and give your best Final Answer, your job depends on it!\n\nThought:"}],
"model": "gpt-4o-mini", "stop": ["\nObservation:"], "stream": false}'
not a summary.\n\nBegin! This is VERY important to you, use the tools available
and give your best Final Answer, your job depends on it!\n\nThought:"}], "model":
"gpt-4o-mini", "stop": ["\nObservation:"], "stream": false}'
headers:
accept:
- application/json
@@ -432,7 +20,7 @@ interactions:
connection:
- keep-alive
content-length:
- '1014'
- '931'
content-type:
- application/json
host:
@@ -462,19 +50,19 @@ interactions:
response:
body:
string: !!binary |
H4sIAAAAAAAAA4xSwY7TMBS85yuefOHSrNJ0l1S5bVdCLHAHBChy7Zf0geNnbGeX1ar/jpxmm1SA
xCVSZt6MZ579nAEI0qIGoQ4yqt6Z/Pbz26efjNXW+0/64917r82H3W64f9fpOxKrpOD9d1TxRXWl
uHcGI7E90cqjjJhc19WmrDY3rzflSPSs0SRZ52J+zXlPlvKyKK/zosrX20l9YFIYRA1fMgCA5/Gb
clqNv0QNxeoF6TEE2aGoz0MAwrNJiJAhUIjSRrGaScU2oh2j34PlR1DSQkcPCBK6FBukDY/oAb7a
N2Slgdvxv4adl1azfRWglQ/sKSIoNuyBAuzNgFfLYzy2Q5Cpqh2MmfDjObfhznneh4k/4y1ZCofG
owxsU8YQ2YmRPWYA38b9DBeVhfPcu9hE/oE2Ga635clPzNeyZCcycpRmxsti2uqlX6MxSjJhsWGh
pDqgnqXzdchBEy+IbNH6zzR/8z41J9v9j/1MKIUuom6cR03qsvE85jG92n+Nnbc8BhbhKUTsm5Zs
h955Or2Z1jVFVdzs222lCpEds98AAAD//wMAfDYBg0EDAAA=
H4sIAAAAAAAAA4xSQW7bMBC86xULXnqxAtmxI1e3FEWBtJekCXJpC4GmVhIdapcgqbhN4L8HlB1L
QVOgFwGa2RnOLPmcAAhdiQKEamVQnTXp5f3d9lsdbndh++C+757Or6/bm6uvn59WH/FGzKKCN1tU
4VV1prizBoNmOtDKoQwYXef5+SK7WK3ny4HouEITZY0N6ZLTTpNOF9limWZ5Ol8f1S1rhV4U8CMB
AHgevjEnVfhbFJDNXpEOvZcNiuI0BCAcm4gI6b32QVIQs5FUTAFpiH4FxDtQkqDRjwgSmhgbJPkd
OoCf9EWTNHA5/BfwyUmqmD54qOUjOx0QFBt2oD1sTI9n02Mc1r2XsSr1xhzx/Sm34cY63vgjf8Jr
Tdq3pUPpmWJGH9iKgd0nAL+G/fRvKgvruLOhDPyAFA3nF/nBT4zXMmHXRzJwkGaKr2bv+JUVBqmN
n2xYKKlarEbpeB2yrzRPiGTS+u8073kfmmtq/sd+JJRCG7AqrcNKq7eNxzGH8dX+a+y05SGw8H98
wK6sNTXorNOHN1PbMsuz1aZe5yoTyT55AQAA//8DAPaYLdRBAwAA
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8e9483a44b2fcf51-SJC
- 8e54a2a7d81467f7-SJC
Connection:
- keep-alive
Content-Encoding:
@@ -482,14 +70,14 @@ interactions:
Content-Type:
- application/json
Date:
- Wed, 27 Nov 2024 19:27:12 GMT
- Wed, 20 Nov 2024 01:23:34 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=pBzYx.9r7fU6srtt2lLWBrgojr5QFAfVuDKoOwUKCK4-1732735632-1.0.1.1-jYgG33D0s.RUVr6OV4fPXS7bQR9Yp5AwbbIAqdxaZCrcisNIYqPqOqxNO9.Lo3Ok7K8FXfSBrrnAOOJDVLa6bA;
path=/; expires=Wed, 27-Nov-24 19:57:12 GMT; domain=.api.openai.com; HttpOnly;
- __cf_bm=DoHo1Z11nN9bxkwZmJGnaxRhyrWE0UfyimYuUVRU6A4-1732065814-1.0.1.1-JVRvFrIJLHEq9OaFQS0qcgYcawE7t2XQ4Tpqd58n2Yfx3mvEqD34MJmooi1LtvdvjB2J8x1Rs.rCdXD.msLlKw;
path=/; expires=Wed, 20-Nov-24 01:53:34 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=TYAi3OpktKJu15t1e4y3VbRnbHK6QYaCeSYJuT6e5Sk-1732735632634-0.0.1.1-604800000;
- _cfuvid=n3RrNhFMqC3HtJ7n3e3agyxnM1YOQ6eKESz_eeXLtZA-1732065814630-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
@@ -502,7 +90,7 @@ interactions:
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '535'
- '344'
openai-version:
- '2020-10-01'
strict-transport-security:
@@ -514,13 +102,13 @@ interactions:
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999769'
- '149999790'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_8501f29c09575f05c51fdec5c1c36090
- req_8f1622677c64913753a595f679596614
status:
code: 200
message: OK

View File

@@ -43,11 +43,10 @@ class TestAuthenticationCommand(unittest.TestCase):
mock_print.assert_any_call("2. Enter the following code: ", "ABCDEF")
mock_open.assert_called_once_with("https://example.com")
@patch("crewai.cli.authentication.main.ToolCommand")
@patch("crewai.cli.authentication.main.requests.post")
@patch("crewai.cli.authentication.main.validate_token")
@patch("crewai.cli.authentication.main.console.print")
def test_poll_for_token_success(self, mock_print, mock_validate_token, mock_post, mock_tool):
def test_poll_for_token_success(self, mock_print, mock_validate_token, mock_post):
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = {
@@ -56,13 +55,10 @@ class TestAuthenticationCommand(unittest.TestCase):
}
mock_post.return_value = mock_response
mock_instance = mock_tool.return_value
mock_instance.login.return_value = None
self.auth_command._poll_for_token({"device_code": "123456"})
mock_validate_token.assert_called_once_with("TOKEN")
mock_print.assert_called_once_with("\n[bold green]Welcome to CrewAI Enterprise![/bold green]\n")
mock_print.assert_called_once_with("\nWelcome to CrewAI+ !!", style="green")
@patch("crewai.cli.authentication.main.requests.post")
@patch("crewai.cli.authentication.main.console.print")

View File

@@ -260,6 +260,6 @@ class TestDeployCommand(unittest.TestCase):
self.assertEqual(project_name, "test_project")
def test_get_crewai_version(self):
from crewai.cli.version import get_crewai_version
from crewai.cli.utils import get_crewai_version
assert isinstance(get_crewai_version(), str)

View File

@@ -1,5 +1,4 @@
from typing import Callable
from crewai.tools import BaseTool, tool
@@ -22,7 +21,8 @@ def test_creating_a_tool_using_annotation():
my_tool.func("What is the meaning of life?") == "What is the meaning of life?"
)
converted_tool = my_tool.to_structured_tool()
# Assert the langchain tool conversion worked as expected
converted_tool = my_tool.to_langchain()
assert converted_tool.name == "Name of my tool"
assert (
@@ -41,7 +41,9 @@ def test_creating_a_tool_using_annotation():
def test_creating_a_tool_using_baseclass():
class MyCustomTool(BaseTool):
name: str = "Name of my tool"
description: str = "Clear description for what this tool is useful for, you agent will need this information to use it."
description: str = (
"Clear description for what this tool is useful for, you agent will need this information to use it."
)
def _run(self, question: str) -> str:
return question
@@ -59,7 +61,8 @@ def test_creating_a_tool_using_baseclass():
}
assert my_tool.run("What is the meaning of life?") == "What is the meaning of life?"
converted_tool = my_tool.to_structured_tool()
# Assert the langchain tool conversion worked as expected
converted_tool = my_tool.to_langchain()
assert converted_tool.name == "Name of my tool"
assert (
@@ -70,7 +73,7 @@ def test_creating_a_tool_using_baseclass():
"question": {"title": "Question", "type": "string"}
}
assert (
converted_tool._run("What is the meaning of life?")
converted_tool.run("What is the meaning of life?")
== "What is the meaning of life?"
)
@@ -78,7 +81,9 @@ def test_creating_a_tool_using_baseclass():
def test_setting_cache_function():
class MyCustomTool(BaseTool):
name: str = "Name of my tool"
description: str = "Clear description for what this tool is useful for, you agent will need this information to use it."
description: str = (
"Clear description for what this tool is useful for, you agent will need this information to use it."
)
cache_function: Callable = lambda: False
def _run(self, question: str) -> str:
@@ -92,7 +97,9 @@ def test_setting_cache_function():
def test_default_cache_function_is_true():
class MyCustomTool(BaseTool):
name: str = "Name of my tool"
description: str = "Clear description for what this tool is useful for, you agent will need this information to use it."
description: str = (
"Clear description for what this tool is useful for, you agent will need this information to use it."
)
def _run(self, question: str) -> str:
return question

View File

@@ -1,146 +0,0 @@
from typing import Optional
import pytest
from pydantic import BaseModel, Field
from crewai.tools.structured_tool import CrewStructuredTool
# Test fixtures
@pytest.fixture
def basic_function():
def test_func(param1: str, param2: int = 0) -> str:
"""Test function with basic params."""
return f"{param1} {param2}"
return test_func
@pytest.fixture
def schema_class():
class TestSchema(BaseModel):
param1: str
param2: int = Field(default=0)
return TestSchema
class TestCrewStructuredTool:
def test_initialization(self, basic_function, schema_class):
"""Test basic initialization of CrewStructuredTool"""
tool = CrewStructuredTool(
name="test_tool",
description="Test tool description",
func=basic_function,
args_schema=schema_class,
)
assert tool.name == "test_tool"
assert tool.description == "Test tool description"
assert tool.func == basic_function
assert tool.args_schema == schema_class
def test_from_function(self, basic_function):
"""Test creating tool from function"""
tool = CrewStructuredTool.from_function(
func=basic_function, name="test_tool", description="Test description"
)
assert tool.name == "test_tool"
assert tool.description == "Test description"
assert tool.func == basic_function
assert isinstance(tool.args_schema, type(BaseModel))
def test_validate_function_signature(self, basic_function, schema_class):
"""Test function signature validation"""
tool = CrewStructuredTool(
name="test_tool",
description="Test tool",
func=basic_function,
args_schema=schema_class,
)
# Should not raise any exceptions
tool._validate_function_signature()
@pytest.mark.asyncio
async def test_ainvoke(self, basic_function):
"""Test asynchronous invocation"""
tool = CrewStructuredTool.from_function(func=basic_function, name="test_tool")
result = await tool.ainvoke(input={"param1": "test"})
assert result == "test 0"
def test_parse_args_dict(self, basic_function):
"""Test parsing dictionary arguments"""
tool = CrewStructuredTool.from_function(func=basic_function, name="test_tool")
parsed = tool._parse_args({"param1": "test", "param2": 42})
assert parsed["param1"] == "test"
assert parsed["param2"] == 42
def test_parse_args_string(self, basic_function):
"""Test parsing string arguments"""
tool = CrewStructuredTool.from_function(func=basic_function, name="test_tool")
parsed = tool._parse_args('{"param1": "test", "param2": 42}')
assert parsed["param1"] == "test"
assert parsed["param2"] == 42
def test_complex_types(self):
"""Test handling of complex parameter types"""
def complex_func(nested: dict, items: list) -> str:
"""Process complex types."""
return f"Processed {len(items)} items with {len(nested)} nested keys"
tool = CrewStructuredTool.from_function(
func=complex_func, name="test_tool", description="Test complex types"
)
result = tool.invoke({"nested": {"key": "value"}, "items": [1, 2, 3]})
assert result == "Processed 3 items with 1 nested keys"
def test_schema_inheritance(self):
"""Test tool creation with inherited schema"""
def extended_func(base_param: str, extra_param: int) -> str:
"""Test function with inherited schema."""
return f"{base_param} {extra_param}"
class BaseSchema(BaseModel):
base_param: str
class ExtendedSchema(BaseSchema):
extra_param: int
tool = CrewStructuredTool.from_function(
func=extended_func, name="test_tool", args_schema=ExtendedSchema
)
result = tool.invoke({"base_param": "test", "extra_param": 42})
assert result == "test 42"
def test_default_values_in_schema(self):
"""Test handling of default values in schema"""
def default_func(
required_param: str,
optional_param: str = "default",
nullable_param: Optional[int] = None,
) -> str:
"""Test function with default values."""
return f"{required_param} {optional_param} {nullable_param}"
tool = CrewStructuredTool.from_function(
func=default_func, name="test_tool", description="Test defaults"
)
# Test with minimal parameters
result = tool.invoke({"required_param": "test"})
assert result == "test default None"
# Test with all parameters
result = tool.invoke(
{"required_param": "test", "optional_param": "custom", "nullable_param": 42}
)
assert result == "test custom 42"

View File

@@ -1,10 +1,7 @@
import json
from typing import Dict, List, Optional
from unittest.mock import MagicMock, Mock, patch
import pytest
from pydantic import BaseModel
from crewai.llm import LLM
from crewai.utilities.converter import (
Converter,
@@ -12,11 +9,12 @@ from crewai.utilities.converter import (
convert_to_model,
convert_with_instructions,
create_converter,
generate_model_description,
get_conversion_instructions,
handle_partial_json,
validate_model,
)
from pydantic import BaseModel
from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
@@ -271,45 +269,3 @@ def test_create_converter_fails_without_agent_or_converter_cls():
create_converter(
llm=Mock(), text="Sample", model=SimpleModel, instructions="Convert"
)
def test_generate_model_description_simple_model():
description = generate_model_description(SimpleModel)
expected_description = '{\n "name": str,\n "age": int\n}'
assert description == expected_description
def test_generate_model_description_nested_model():
description = generate_model_description(NestedModel)
expected_description = (
'{\n "id": int,\n "data": {\n "name": str,\n "age": int\n}\n}'
)
assert description == expected_description
def test_generate_model_description_optional_field():
class ModelWithOptionalField(BaseModel):
name: Optional[str]
age: int
description = generate_model_description(ModelWithOptionalField)
expected_description = '{\n "name": Optional[str],\n "age": int\n}'
assert description == expected_description
def test_generate_model_description_list_field():
class ModelWithListField(BaseModel):
items: List[int]
description = generate_model_description(ModelWithListField)
expected_description = '{\n "items": List[int]\n}'
assert description == expected_description
def test_generate_model_description_dict_field():
class ModelWithDictField(BaseModel):
attributes: Dict[str, int]
description = generate_model_description(ModelWithDictField)
expected_description = '{\n "attributes": Dict[str, int]\n}'
assert description == expected_description

15
uv.lock generated
View File

@@ -608,7 +608,7 @@ wheels = [
[[package]]
name = "crewai"
version = "0.83.0"
version = "0.80.0"
source = { editable = "." }
dependencies = [
{ name = "appdirs" },
@@ -619,13 +619,12 @@ dependencies = [
{ name = "instructor" },
{ name = "json-repair" },
{ name = "jsonref" },
{ name = "langchain" },
{ name = "litellm" },
{ name = "openai" },
{ name = "openpyxl" },
{ name = "opentelemetry-api" },
{ name = "opentelemetry-exporter-otlp-proto-http" },
{ name = "opentelemetry-sdk" },
{ name = "pdfplumber" },
{ name = "pydantic" },
{ name = "python-dotenv" },
{ name = "pyvis" },
@@ -642,9 +641,6 @@ agentops = [
fastembed = [
{ name = "fastembed" },
]
mem0 = [
{ name = "mem0ai" },
]
openpyxl = [
{ name = "openpyxl" },
]
@@ -654,6 +650,9 @@ pandas = [
pdfplumber = [
{ name = "pdfplumber" },
]
mem0 = [
{ name = "mem0ai" },
]
tools = [
{ name = "crewai-tools" },
]
@@ -691,16 +690,15 @@ requires-dist = [
{ name = "instructor", specifier = ">=1.3.3" },
{ name = "json-repair", specifier = ">=0.25.2" },
{ name = "jsonref", specifier = ">=1.1.0" },
{ name = "langchain", specifier = ">=0.2.16" },
{ name = "litellm", specifier = ">=1.44.22" },
{ name = "mem0ai", marker = "extra == 'mem0'", specifier = ">=0.1.29" },
{ name = "openai", specifier = ">=1.13.3" },
{ name = "openpyxl", specifier = ">=3.1.5" },
{ name = "openpyxl", marker = "extra == 'openpyxl'", specifier = ">=3.1.5" },
{ name = "opentelemetry-api", specifier = ">=1.22.0" },
{ name = "opentelemetry-exporter-otlp-proto-http", specifier = ">=1.22.0" },
{ name = "opentelemetry-sdk", specifier = ">=1.22.0" },
{ name = "pandas", marker = "extra == 'pandas'", specifier = ">=2.2.3" },
{ name = "pdfplumber", specifier = ">=0.11.4" },
{ name = "pdfplumber", marker = "extra == 'pdfplumber'", specifier = ">=0.11.4" },
{ name = "pydantic", specifier = ">=2.4.2" },
{ name = "python-dotenv", specifier = ">=1.0.0" },
@@ -954,6 +952,7 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/c1/8b/5fe2cc11fee489817272089c4203e679c63b570a5aaeb18d852ae3cbba6a/et_xmlfile-2.0.0-py3-none-any.whl", hash = "sha256:7a91720bc756843502c3b7504c77b8fe44217c85c537d85037f0f536151b2caa", size = 18059 },
]
[[package]]
name = "exceptiongroup"
version = "1.2.2"