mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-02-20 04:48:18 +00:00
Compare commits
26 Commits
v0.83.0
...
add/agent-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ea6d04a9d9 | ||
|
|
a81200a020 | ||
|
|
61fe1c69d9 | ||
|
|
3eb52dad9f | ||
|
|
87e9a0c91a | ||
|
|
24d2d9cd55 | ||
|
|
85b8d2af6f | ||
|
|
5b03d6c8bc | ||
|
|
366bbbbea3 | ||
|
|
3f87bf3ada | ||
|
|
b3deac2a2b | ||
|
|
293305790d | ||
|
|
95f2e9eded | ||
|
|
707c50b833 | ||
|
|
8bc09eb054 | ||
|
|
db1b678c3a | ||
|
|
6f32bf52cc | ||
|
|
49d173a02d | ||
|
|
4069b621d5 | ||
|
|
a21feda2cc | ||
|
|
15d549e157 | ||
|
|
74d681f3af | ||
|
|
6c6c60318c | ||
|
|
a7147c99c6 | ||
|
|
6fe308202e | ||
|
|
63ecb7395d |
2
.github/workflows/linter.yml
vendored
2
.github/workflows/linter.yml
vendored
@@ -6,7 +6,7 @@ jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Requirements
|
||||
run: |
|
||||
|
||||
8
.github/workflows/mkdocs.yml
vendored
8
.github/workflows/mkdocs.yml
vendored
@@ -13,10 +13,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
@@ -25,7 +25,7 @@ jobs:
|
||||
run: echo "::set-output name=hash::$(sha256sum requirements-doc.txt | awk '{print $1}')"
|
||||
|
||||
- name: Setup cache
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
key: mkdocs-material-${{ steps.req-hash.outputs.hash }}
|
||||
path: .cache
|
||||
@@ -42,4 +42,4 @@ jobs:
|
||||
GH_TOKEN: ${{ secrets.GH_TOKEN }}
|
||||
|
||||
- name: Build and deploy MkDocs
|
||||
run: mkdocs gh-deploy --force
|
||||
run: mkdocs gh-deploy --force
|
||||
|
||||
2
.github/workflows/security-checker.yml
vendored
2
.github/workflows/security-checker.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11.9"
|
||||
|
||||
|
||||
2
.github/workflows/type-checker.yml
vendored
2
.github/workflows/type-checker.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11.9"
|
||||
|
||||
|
||||
@@ -121,7 +121,7 @@ researcher:
|
||||
You're a seasoned researcher with a knack for uncovering the latest
|
||||
developments in {topic}. Known for your ability to find the most relevant
|
||||
information and present it in a clear and concise manner.
|
||||
|
||||
|
||||
reporting_analyst:
|
||||
role: >
|
||||
{topic} Reporting Analyst
|
||||
@@ -205,7 +205,7 @@ class LatestAiDevelopmentCrew():
|
||||
tasks=self.tasks, # Automatically created by the @task decorator
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
**main.py**
|
||||
@@ -357,7 +357,7 @@ uv run pytest .
|
||||
### Running static type checks
|
||||
|
||||
```bash
|
||||
uvx mypy
|
||||
uvx mypy src
|
||||
```
|
||||
|
||||
### Packaging
|
||||
|
||||
@@ -51,12 +51,41 @@ crew = Crew(
|
||||
tasks=[task],
|
||||
verbose=True,
|
||||
process=Process.sequential,
|
||||
knowledge={"sources": [string_source], "metadata": {"preference": "personal"}}, # Enable knowledge by adding the sources here. You can also add more sources to the sources list.
|
||||
knowledge_sources=[string_source], # Enable knowledge by adding the sources here.
|
||||
)
|
||||
|
||||
result = crew.kickoff(inputs={"question": "What city does John live in and how old is he?"})
|
||||
```
|
||||
|
||||
## Appending Knowledge Sources To Individual Agents
|
||||
Sometimes you may want to append knowledge sources to an individual agent. You can do this by setting the `knowledge` parameter in the `Agent` class.
|
||||
|
||||
```python
|
||||
agent = Agent(
|
||||
...
|
||||
knowledge_sources=[
|
||||
StringKnowledgeSource(
|
||||
content="Users name is John. He is 30 years old and lives in San Francisco.",
|
||||
metadata={"preference": "personal"},
|
||||
)
|
||||
],
|
||||
)
|
||||
```
|
||||
|
||||
## Agent Level Knowledge Sources
|
||||
|
||||
You can also append knowledge sources to an individual agent by setting the `knowledge_sources` parameter in the `Agent` class.
|
||||
|
||||
```python
|
||||
string_source = StringKnowledgeSource(
|
||||
content="Users name is John. He is 30 years old and lives in San Francisco.",
|
||||
metadata={"preference": "personal"},
|
||||
)
|
||||
agent = Agent(
|
||||
...
|
||||
knowledge_sources=[string_source],
|
||||
)
|
||||
```
|
||||
|
||||
## Embedder Configuration
|
||||
|
||||
@@ -70,10 +99,7 @@ string_source = StringKnowledgeSource(
|
||||
)
|
||||
crew = Crew(
|
||||
...
|
||||
knowledge={
|
||||
"sources": [string_source],
|
||||
"metadata": {"preference": "personal"},
|
||||
"embedder_config": {"provider": "openai", "config": {"model": "text-embedding-3-small"}},
|
||||
},
|
||||
knowledge_sources=[string_source],
|
||||
embedder_config={"provider": "ollama", "config": {"model": "nomic-embed-text:latest"}},
|
||||
)
|
||||
```
|
||||
|
||||
@@ -68,6 +68,7 @@
|
||||
"concepts/tasks",
|
||||
"concepts/crews",
|
||||
"concepts/flows",
|
||||
"concepts/knowledge",
|
||||
"concepts/llms",
|
||||
"concepts/processes",
|
||||
"concepts/collaboration",
|
||||
|
||||
@@ -9,7 +9,6 @@ authors = [
|
||||
]
|
||||
dependencies = [
|
||||
"pydantic>=2.4.2",
|
||||
"langchain>=0.2.16",
|
||||
"openai>=1.13.3",
|
||||
"opentelemetry-api>=1.22.0",
|
||||
"opentelemetry-sdk>=1.22.0",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
from typing import Any, List, Literal, Optional, Union
|
||||
from typing import Any, List, Literal, Optional, Union, Dict
|
||||
|
||||
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
|
||||
|
||||
@@ -10,13 +10,18 @@ from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||
from crewai.cli.constants import ENV_VARS
|
||||
from crewai.llm import LLM
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.memory.contextual.contextual_memory import ContextualMemory
|
||||
from crewai.task import Task
|
||||
from crewai.tools import BaseTool
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.utilities import Converter, Prompts
|
||||
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
|
||||
from crewai.utilities.converter import generate_model_description
|
||||
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
from crewai.knowledge.utils.knowledge_utils import extract_knowledge_context
|
||||
|
||||
|
||||
def mock_agent_ops_provider():
|
||||
@@ -63,6 +68,7 @@ class Agent(BaseAgent):
|
||||
allow_delegation: Whether the agent is allowed to delegate tasks to other agents.
|
||||
tools: Tools at agents disposal
|
||||
step_callback: Callback to be executed after each step of the agent execution.
|
||||
knowledge_sources: Knowledge sources for the agent.
|
||||
"""
|
||||
|
||||
_times_executed: int = PrivateAttr(default=0)
|
||||
@@ -120,11 +126,23 @@ class Agent(BaseAgent):
|
||||
default="safe",
|
||||
description="Mode for code execution: 'safe' (using Docker) or 'unsafe' (direct execution).",
|
||||
)
|
||||
embedder_config: Optional[Dict[str, Any]] = Field(
|
||||
default=None,
|
||||
description="Embedder configuration for the agent.",
|
||||
)
|
||||
knowledge_sources: Optional[List[BaseKnowledgeSource]] = Field(
|
||||
default=None,
|
||||
description="Knowledge sources for the agent.",
|
||||
)
|
||||
_knowledge: Optional[Knowledge] = PrivateAttr(
|
||||
default=None,
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def post_init_setup(self):
|
||||
self._set_knowledge()
|
||||
self.agent_ops_agent_name = self.role
|
||||
unnacepted_attributes = [
|
||||
unaccepted_attributes = [
|
||||
"AWS_ACCESS_KEY_ID",
|
||||
"AWS_SECRET_ACCESS_KEY",
|
||||
"AWS_REGION_NAME",
|
||||
@@ -158,28 +176,23 @@ class Agent(BaseAgent):
|
||||
for provider, env_vars in ENV_VARS.items():
|
||||
if provider == set_provider:
|
||||
for env_var in env_vars:
|
||||
if env_var["key_name"] in unnacepted_attributes:
|
||||
continue
|
||||
# Check if the environment variable is set
|
||||
if "key_name" in env_var:
|
||||
env_value = os.environ.get(env_var["key_name"])
|
||||
key_name = env_var.get("key_name")
|
||||
if key_name and key_name not in unaccepted_attributes:
|
||||
env_value = os.environ.get(key_name)
|
||||
if env_value:
|
||||
# Map key names containing "API_KEY" to "api_key"
|
||||
key_name = (
|
||||
"api_key"
|
||||
if "API_KEY" in env_var["key_name"]
|
||||
else env_var["key_name"]
|
||||
"api_key" if "API_KEY" in key_name else key_name
|
||||
)
|
||||
# Map key names containing "API_BASE" to "api_base"
|
||||
key_name = (
|
||||
"api_base"
|
||||
if "API_BASE" in env_var["key_name"]
|
||||
else key_name
|
||||
"api_base" if "API_BASE" in key_name else key_name
|
||||
)
|
||||
# Map key names containing "API_VERSION" to "api_version"
|
||||
key_name = (
|
||||
"api_version"
|
||||
if "API_VERSION" in env_var["key_name"]
|
||||
if "API_VERSION" in key_name
|
||||
else key_name
|
||||
)
|
||||
llm_params[key_name] = env_value
|
||||
@@ -235,9 +248,24 @@ class Agent(BaseAgent):
|
||||
self.cache_handler = CacheHandler()
|
||||
self.set_cache_handler(self.cache_handler)
|
||||
|
||||
def _set_knowledge(self):
|
||||
try:
|
||||
if self.knowledge_sources:
|
||||
knowledge_agent_name = f"{self.role.replace(' ', '_')}"
|
||||
if isinstance(self.knowledge_sources, list) and all(
|
||||
isinstance(k, BaseKnowledgeSource) for k in self.knowledge_sources
|
||||
):
|
||||
self._knowledge = Knowledge(
|
||||
sources=self.knowledge_sources,
|
||||
embedder_config=self.embedder_config,
|
||||
collection_name=knowledge_agent_name,
|
||||
)
|
||||
except (TypeError, ValueError) as e:
|
||||
raise ValueError(f"Invalid Knowledge Configuration: {str(e)}")
|
||||
|
||||
def execute_task(
|
||||
self,
|
||||
task: Any,
|
||||
task: Task,
|
||||
context: Optional[str] = None,
|
||||
tools: Optional[List[BaseTool]] = None,
|
||||
) -> str:
|
||||
@@ -256,6 +284,22 @@ class Agent(BaseAgent):
|
||||
|
||||
task_prompt = task.prompt()
|
||||
|
||||
# If the task requires output in JSON or Pydantic format,
|
||||
# append specific instructions to the task prompt to ensure
|
||||
# that the final answer does not include any code block markers
|
||||
if task.output_json or task.output_pydantic:
|
||||
# Generate the schema based on the output format
|
||||
if task.output_json:
|
||||
# schema = json.dumps(task.output_json, indent=2)
|
||||
schema = generate_model_description(task.output_json)
|
||||
|
||||
elif task.output_pydantic:
|
||||
schema = generate_model_description(task.output_pydantic)
|
||||
|
||||
task_prompt += "\n" + self.i18n.slice("formatted_task_instructions").format(
|
||||
output_format=schema
|
||||
)
|
||||
|
||||
if context:
|
||||
task_prompt = self.i18n.slice("task_with_context").format(
|
||||
task=task_prompt, context=context
|
||||
@@ -273,17 +317,21 @@ class Agent(BaseAgent):
|
||||
if memory.strip() != "":
|
||||
task_prompt += self.i18n.slice("memory").format(memory=memory)
|
||||
|
||||
# Integrate the knowledge base
|
||||
if self.crew and self.crew.knowledge:
|
||||
knowledge_snippets = self.crew.knowledge.query([task.prompt()])
|
||||
valid_snippets = [
|
||||
result["context"]
|
||||
for result in knowledge_snippets
|
||||
if result and result.get("context")
|
||||
]
|
||||
if valid_snippets:
|
||||
formatted_knowledge = "\n".join(valid_snippets)
|
||||
task_prompt += f"\n\nAdditional Information:\n{formatted_knowledge}"
|
||||
if self._knowledge:
|
||||
agent_knowledge_snippets = self._knowledge.query([task.prompt()])
|
||||
if agent_knowledge_snippets:
|
||||
agent_knowledge_context = extract_knowledge_context(
|
||||
agent_knowledge_snippets
|
||||
)
|
||||
if agent_knowledge_context:
|
||||
task_prompt += agent_knowledge_context
|
||||
|
||||
if self.crew:
|
||||
knowledge_snippets = self.crew.query_knowledge([task.prompt()])
|
||||
if knowledge_snippets:
|
||||
crew_knowledge_context = extract_knowledge_context(knowledge_snippets)
|
||||
if crew_knowledge_context:
|
||||
task_prompt += crew_knowledge_context
|
||||
|
||||
tools = tools or self.tools or []
|
||||
self.create_agent_executor(tools=tools, task=task)
|
||||
@@ -399,7 +447,7 @@ class Agent(BaseAgent):
|
||||
|
||||
for tool in tools:
|
||||
if isinstance(tool, CrewAITool):
|
||||
tools_list.append(tool.to_langchain())
|
||||
tools_list.append(tool.to_structured_tool())
|
||||
else:
|
||||
tools_list.append(tool)
|
||||
except ModuleNotFoundError:
|
||||
|
||||
@@ -19,6 +19,7 @@ from crewai.agents.agent_builder.utilities.base_token_process import TokenProces
|
||||
from crewai.agents.cache.cache_handler import CacheHandler
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.tools import BaseTool
|
||||
from crewai.tools.base_tool import Tool
|
||||
from crewai.utilities import I18N, Logger, RPMController
|
||||
from crewai.utilities.config import process_config
|
||||
|
||||
@@ -106,7 +107,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
default=False,
|
||||
description="Enable agent to delegate and ask questions among each other.",
|
||||
)
|
||||
tools: Optional[List[BaseTool]] = Field(
|
||||
tools: Optional[List[Any]] = Field(
|
||||
default_factory=list, description="Tools at agents' disposal"
|
||||
)
|
||||
max_iter: Optional[int] = Field(
|
||||
@@ -135,6 +136,35 @@ class BaseAgent(ABC, BaseModel):
|
||||
def process_model_config(cls, values):
|
||||
return process_config(values, cls)
|
||||
|
||||
@field_validator("tools")
|
||||
@classmethod
|
||||
def validate_tools(cls, tools: List[Any]) -> List[BaseTool]:
|
||||
"""Validate and process the tools provided to the agent.
|
||||
|
||||
This method ensures that each tool is either an instance of BaseTool
|
||||
or an object with 'name', 'func', and 'description' attributes. If the
|
||||
tool meets these criteria, it is processed and added to the list of
|
||||
tools. Otherwise, a ValueError is raised.
|
||||
"""
|
||||
processed_tools = []
|
||||
for tool in tools:
|
||||
if isinstance(tool, BaseTool):
|
||||
processed_tools.append(tool)
|
||||
elif (
|
||||
hasattr(tool, "name")
|
||||
and hasattr(tool, "func")
|
||||
and hasattr(tool, "description")
|
||||
):
|
||||
# Tool has the required attributes, create a Tool instance
|
||||
processed_tools.append(Tool.from_langchain(tool))
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Invalid tool type: {type(tool)}. "
|
||||
"Tool must be an instance of BaseTool or "
|
||||
"an object with 'name', 'func', and 'description' attributes."
|
||||
)
|
||||
return processed_tools
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_and_set_attributes(self):
|
||||
# Validate required fields
|
||||
|
||||
@@ -7,6 +7,7 @@ from rich.console import Console
|
||||
|
||||
from .constants import AUTH0_AUDIENCE, AUTH0_CLIENT_ID, AUTH0_DOMAIN
|
||||
from .utils import TokenManager, validate_token
|
||||
from crewai.cli.tools.main import ToolCommand
|
||||
|
||||
console = Console()
|
||||
|
||||
@@ -63,7 +64,22 @@ class AuthenticationCommand:
|
||||
validate_token(token_data["id_token"])
|
||||
expires_in = 360000 # Token expiration time in seconds
|
||||
self.token_manager.save_tokens(token_data["access_token"], expires_in)
|
||||
console.print("\nWelcome to CrewAI+ !!", style="green")
|
||||
|
||||
try:
|
||||
ToolCommand().login()
|
||||
except Exception:
|
||||
console.print(
|
||||
"\n[bold yellow]Warning:[/bold yellow] Authentication with the Tool Repository failed.",
|
||||
style="yellow",
|
||||
)
|
||||
console.print(
|
||||
"Other features will work normally, but you may experience limitations "
|
||||
"with downloading and publishing tools."
|
||||
"\nRun [bold]crewai login[/bold] to try logging in again.\n",
|
||||
style="yellow",
|
||||
)
|
||||
|
||||
console.print("\n[bold green]Welcome to CrewAI Enterprise![/bold green]\n")
|
||||
return
|
||||
|
||||
if token_data["error"] not in ("authorization_pending", "slow_down"):
|
||||
|
||||
10
src/crewai/cli/authentication/token.py
Normal file
10
src/crewai/cli/authentication/token.py
Normal file
@@ -0,0 +1,10 @@
|
||||
from .utils import TokenManager
|
||||
|
||||
def get_auth_token() -> str:
|
||||
"""Get the authentication token."""
|
||||
access_token = TokenManager().get_token()
|
||||
if not access_token:
|
||||
raise Exception()
|
||||
return access_token
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ import requests
|
||||
from requests.exceptions import JSONDecodeError
|
||||
from rich.console import Console
|
||||
from crewai.cli.plus_api import PlusAPI
|
||||
from crewai.cli.utils import get_auth_token
|
||||
from crewai.cli.authentication.token import get_auth_token
|
||||
from crewai.telemetry.telemetry import Telemetry
|
||||
|
||||
console = Console()
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from typing import Optional
|
||||
import requests
|
||||
from os import getenv
|
||||
from crewai.cli.utils import get_crewai_version
|
||||
from crewai.cli.version import get_crewai_version
|
||||
from urllib.parse import urljoin
|
||||
|
||||
|
||||
|
||||
@@ -3,7 +3,8 @@ import subprocess
|
||||
import click
|
||||
from packaging import version
|
||||
|
||||
from crewai.cli.utils import get_crewai_version, read_toml
|
||||
from crewai.cli.utils import read_toml
|
||||
from crewai.cli.version import get_crewai_version
|
||||
|
||||
|
||||
def run_crew() -> None:
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import importlib.metadata
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
@@ -9,7 +8,6 @@ import click
|
||||
import tomli
|
||||
from rich.console import Console
|
||||
|
||||
from crewai.cli.authentication.utils import TokenManager
|
||||
from crewai.cli.constants import ENV_VARS
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
@@ -137,11 +135,6 @@ def _get_nested_value(data: Dict[str, Any], keys: List[str]) -> Any:
|
||||
return reduce(dict.__getitem__, keys, data)
|
||||
|
||||
|
||||
def get_crewai_version() -> str:
|
||||
"""Get the version number of CrewAI running the CLI"""
|
||||
return importlib.metadata.version("crewai")
|
||||
|
||||
|
||||
def fetch_and_json_env_file(env_file_path: str = ".env") -> dict:
|
||||
"""Fetch the environment variables from a .env file and return them as a dictionary."""
|
||||
try:
|
||||
@@ -166,14 +159,6 @@ def fetch_and_json_env_file(env_file_path: str = ".env") -> dict:
|
||||
return {}
|
||||
|
||||
|
||||
def get_auth_token() -> str:
|
||||
"""Get the authentication token."""
|
||||
access_token = TokenManager().get_token()
|
||||
if not access_token:
|
||||
raise Exception()
|
||||
return access_token
|
||||
|
||||
|
||||
def tree_copy(source, destination):
|
||||
"""Copies the entire directory structure from the source to the destination."""
|
||||
for item in os.listdir(source):
|
||||
|
||||
6
src/crewai/cli/version.py
Normal file
6
src/crewai/cli/version.py
Normal file
@@ -0,0 +1,6 @@
|
||||
import importlib.metadata
|
||||
|
||||
def get_crewai_version() -> str:
|
||||
"""Get the version number of CrewAI running the CLI"""
|
||||
return importlib.metadata.version("crewai")
|
||||
|
||||
@@ -28,6 +28,7 @@ from crewai.memory.entity.entity_memory import EntityMemory
|
||||
from crewai.memory.long_term.long_term_memory import LongTermMemory
|
||||
from crewai.memory.short_term.short_term_memory import ShortTermMemory
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.memory.user.user_memory import UserMemory
|
||||
from crewai.process import Process
|
||||
from crewai.task import Task
|
||||
@@ -202,10 +203,13 @@ class Crew(BaseModel):
|
||||
default=[],
|
||||
description="List of execution logs for tasks",
|
||||
)
|
||||
knowledge: Optional[Dict[str, Any]] = Field(
|
||||
default=None, description="Knowledge for the crew. Add knowledge sources to the knowledge object."
|
||||
knowledge_sources: Optional[List[BaseKnowledgeSource]] = Field(
|
||||
default=None,
|
||||
description="Knowledge sources for the crew. Add knowledge sources to the knowledge object.",
|
||||
)
|
||||
_knowledge: Optional[Knowledge] = PrivateAttr(
|
||||
default=None,
|
||||
)
|
||||
|
||||
|
||||
@field_validator("id", mode="before")
|
||||
@classmethod
|
||||
@@ -282,11 +286,22 @@ class Crew(BaseModel):
|
||||
|
||||
@model_validator(mode="after")
|
||||
def create_crew_knowledge(self) -> "Crew":
|
||||
if self.knowledge:
|
||||
"""Create the knowledge for the crew."""
|
||||
if self.knowledge_sources:
|
||||
try:
|
||||
self.knowledge = Knowledge(**self.knowledge) if isinstance(self.knowledge, dict) else self.knowledge
|
||||
except (TypeError, ValueError) as e:
|
||||
raise ValueError(f"Invalid knowledge configuration: {str(e)}")
|
||||
if isinstance(self.knowledge_sources, list) and all(
|
||||
isinstance(k, BaseKnowledgeSource) for k in self.knowledge_sources
|
||||
):
|
||||
self._knowledge = Knowledge(
|
||||
sources=self.knowledge_sources,
|
||||
embedder_config=self.embedder,
|
||||
collection_name="crew",
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self._logger.log(
|
||||
"warning", f"Failed to init knowledge: {e}", color="yellow"
|
||||
)
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
@@ -942,6 +957,11 @@ class Crew(BaseModel):
|
||||
result = self._execute_tasks(self.tasks, start_index, True)
|
||||
return result
|
||||
|
||||
def query_knowledge(self, query: List[str]) -> Union[List[Dict[str, Any]], None]:
|
||||
if self._knowledge:
|
||||
return self._knowledge.query(query)
|
||||
return None
|
||||
|
||||
def copy(self):
|
||||
"""Create a deep copy of the Crew."""
|
||||
|
||||
|
||||
@@ -5,8 +5,8 @@ from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.knowledge.storage.knowledge_storage import KnowledgeStorage
|
||||
from crewai.utilities.logger import Logger
|
||||
from crewai.utilities.constants import DEFAULT_SCORE_THRESHOLD
|
||||
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "false" # removes logging from fastembed
|
||||
|
||||
|
||||
@@ -18,24 +18,33 @@ class Knowledge(BaseModel):
|
||||
storage: KnowledgeStorage = Field(default_factory=KnowledgeStorage)
|
||||
embedder_config: Optional[Dict[str, Any]] = None
|
||||
"""
|
||||
|
||||
sources: List[BaseKnowledgeSource] = Field(default_factory=list)
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
storage: KnowledgeStorage = Field(default_factory=KnowledgeStorage)
|
||||
embedder_config: Optional[Dict[str, Any]] = None
|
||||
collection_name: Optional[str] = None
|
||||
|
||||
def __init__(self, embedder_config: Optional[Dict[str, Any]] = None, **data):
|
||||
def __init__(
|
||||
self,
|
||||
collection_name: str,
|
||||
sources: List[BaseKnowledgeSource],
|
||||
embedder_config: Optional[Dict[str, Any]] = None,
|
||||
storage: Optional[KnowledgeStorage] = None,
|
||||
**data,
|
||||
):
|
||||
super().__init__(**data)
|
||||
self.storage = KnowledgeStorage(embedder_config=embedder_config or None)
|
||||
|
||||
try:
|
||||
for source in self.sources:
|
||||
source.add()
|
||||
except Exception as e:
|
||||
Logger(verbose=True).log(
|
||||
"warning",
|
||||
f"Failed to init knowledge: {e}",
|
||||
color="yellow",
|
||||
if storage:
|
||||
self.storage = storage
|
||||
else:
|
||||
self.storage = KnowledgeStorage(
|
||||
embedder_config=embedder_config, collection_name=collection_name
|
||||
)
|
||||
self.sources = sources
|
||||
self.storage.initialize_knowledge_storage()
|
||||
for source in sources:
|
||||
source.storage = self.storage
|
||||
source.add()
|
||||
|
||||
def query(
|
||||
self, query: List[str], limit: int = 3, preference: Optional[str] = None
|
||||
@@ -52,3 +61,8 @@ class Knowledge(BaseModel):
|
||||
score_threshold=DEFAULT_SCORE_THRESHOLD,
|
||||
)
|
||||
return results
|
||||
|
||||
def _add_sources(self):
|
||||
for source in self.sources:
|
||||
source.storage = self.storage
|
||||
source.add()
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List, Dict, Any
|
||||
from typing import List, Dict, Any, Optional
|
||||
|
||||
import numpy as np
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
@@ -18,6 +18,7 @@ class BaseKnowledgeSource(BaseModel, ABC):
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
storage: KnowledgeStorage = Field(default_factory=KnowledgeStorage)
|
||||
metadata: Dict[str, Any] = Field(default_factory=dict)
|
||||
collection_name: Optional[str] = Field(default=None)
|
||||
|
||||
@abstractmethod
|
||||
def load_content(self) -> Dict[Any, str]:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import List
|
||||
from typing import List, Optional
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
@@ -9,6 +9,7 @@ class StringKnowledgeSource(BaseKnowledgeSource):
|
||||
"""A knowledge source that stores and queries plain text content using embeddings."""
|
||||
|
||||
content: str = Field(...)
|
||||
collection_name: Optional[str] = Field(default=None)
|
||||
|
||||
def model_post_init(self, _):
|
||||
"""Post-initialization method to validate content."""
|
||||
|
||||
@@ -3,12 +3,16 @@ import io
|
||||
import logging
|
||||
import chromadb
|
||||
import os
|
||||
|
||||
import chromadb.errors
|
||||
from crewai.utilities.paths import db_storage_path
|
||||
from typing import Optional, List
|
||||
from typing import Dict, Any
|
||||
from typing import Optional, List, Dict, Any, Union
|
||||
from crewai.utilities import EmbeddingConfigurator
|
||||
from crewai.knowledge.storage.base_knowledge_storage import BaseKnowledgeStorage
|
||||
import hashlib
|
||||
from chromadb.config import Settings
|
||||
from chromadb.api import ClientAPI
|
||||
from crewai.utilities.logger import Logger
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
@@ -35,9 +39,16 @@ class KnowledgeStorage(BaseKnowledgeStorage):
|
||||
"""
|
||||
|
||||
collection: Optional[chromadb.Collection] = None
|
||||
collection_name: Optional[str] = "knowledge"
|
||||
app: Optional[ClientAPI] = None
|
||||
|
||||
def __init__(self, embedder_config: Optional[Dict[str, Any]] = None):
|
||||
self._initialize_app(embedder_config or {})
|
||||
def __init__(
|
||||
self,
|
||||
embedder_config: Optional[Dict[str, Any]] = None,
|
||||
collection_name: Optional[str] = None,
|
||||
):
|
||||
self.collection_name = collection_name
|
||||
self._set_embedder_config(embedder_config)
|
||||
|
||||
def search(
|
||||
self,
|
||||
@@ -67,43 +78,75 @@ class KnowledgeStorage(BaseKnowledgeStorage):
|
||||
else:
|
||||
raise Exception("Collection not initialized")
|
||||
|
||||
def _initialize_app(self, embedder_config: Optional[Dict[str, Any]] = None):
|
||||
import chromadb
|
||||
from chromadb.config import Settings
|
||||
|
||||
self._set_embedder_config(embedder_config)
|
||||
|
||||
def initialize_knowledge_storage(self):
|
||||
base_path = os.path.join(db_storage_path(), "knowledge")
|
||||
chroma_client = chromadb.PersistentClient(
|
||||
path=f"{db_storage_path()}/knowledge",
|
||||
path=base_path,
|
||||
settings=Settings(allow_reset=True),
|
||||
)
|
||||
|
||||
self.app = chroma_client
|
||||
|
||||
try:
|
||||
self.collection = self.app.get_or_create_collection(name="knowledge")
|
||||
collection_name = (
|
||||
f"knowledge_{self.collection_name}"
|
||||
if self.collection_name
|
||||
else "knowledge"
|
||||
)
|
||||
if self.app:
|
||||
self.collection = self.app.get_or_create_collection(
|
||||
name=collection_name, embedding_function=self.embedder_config
|
||||
)
|
||||
else:
|
||||
raise Exception("Vector Database Client not initialized")
|
||||
except Exception:
|
||||
raise Exception("Failed to create or get collection")
|
||||
|
||||
def reset(self):
|
||||
if self.app:
|
||||
self.app.reset()
|
||||
else:
|
||||
base_path = os.path.join(db_storage_path(), "knowledge")
|
||||
self.app = chromadb.PersistentClient(
|
||||
path=base_path,
|
||||
settings=Settings(allow_reset=True),
|
||||
)
|
||||
self.app.reset()
|
||||
|
||||
def save(
|
||||
self, documents: List[str], metadata: Dict[str, Any] | List[Dict[str, Any]]
|
||||
self,
|
||||
documents: List[str],
|
||||
metadata: Union[Dict[str, Any], List[Dict[str, Any]]],
|
||||
):
|
||||
if self.collection:
|
||||
metadatas = [metadata] if isinstance(metadata, dict) else metadata
|
||||
try:
|
||||
metadatas = [metadata] if isinstance(metadata, dict) else metadata
|
||||
|
||||
ids = [
|
||||
hashlib.sha256(doc.encode("utf-8")).hexdigest() for doc in documents
|
||||
]
|
||||
ids = [
|
||||
hashlib.sha256(doc.encode("utf-8")).hexdigest() for doc in documents
|
||||
]
|
||||
|
||||
self.collection.upsert(
|
||||
documents=documents,
|
||||
metadatas=metadatas,
|
||||
ids=ids,
|
||||
)
|
||||
self.collection.upsert(
|
||||
documents=documents,
|
||||
metadatas=metadatas,
|
||||
ids=ids,
|
||||
)
|
||||
except chromadb.errors.InvalidDimensionException as e:
|
||||
Logger(verbose=True).log(
|
||||
"error",
|
||||
"Embedding dimension mismatch. This usually happens when mixing different embedding models. Try resetting the collection using `crewai reset-memories -a`",
|
||||
"red",
|
||||
)
|
||||
raise ValueError(
|
||||
"Embedding dimension mismatch. Make sure you're using the same embedding model "
|
||||
"across all operations with this collection."
|
||||
"Try resetting the collection using `crewai reset-memories -a`"
|
||||
) from e
|
||||
except Exception as e:
|
||||
Logger(verbose=True).log(
|
||||
"error", f"Failed to upsert documents: {e}", "red"
|
||||
)
|
||||
raise
|
||||
else:
|
||||
raise Exception("Collection not initialized")
|
||||
|
||||
|
||||
12
src/crewai/knowledge/utils/knowledge_utils.py
Normal file
12
src/crewai/knowledge/utils/knowledge_utils.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from typing import Any, Dict, List
|
||||
|
||||
|
||||
def extract_knowledge_context(knowledge_snippets: List[Dict[str, Any]]) -> str:
|
||||
"""Extract knowledge from the task prompt."""
|
||||
valid_snippets = [
|
||||
result["context"]
|
||||
for result in knowledge_snippets
|
||||
if result and result.get("context")
|
||||
]
|
||||
snippet = "\n".join(valid_snippets)
|
||||
return f"Additional Information: {snippet}" if valid_snippets else ""
|
||||
@@ -279,9 +279,7 @@ class Task(BaseModel):
|
||||
content = (
|
||||
json_output
|
||||
if json_output
|
||||
else pydantic_output.model_dump_json()
|
||||
if pydantic_output
|
||||
else result
|
||||
else pydantic_output.model_dump_json() if pydantic_output else result
|
||||
)
|
||||
self._save_file(content)
|
||||
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from inspect import signature
|
||||
from typing import Any, Callable, Type, get_args, get_origin
|
||||
|
||||
from langchain_core.tools import StructuredTool
|
||||
from pydantic import BaseModel, ConfigDict, Field, validator
|
||||
from pydantic import BaseModel, ConfigDict, Field, create_model, validator
|
||||
from pydantic import BaseModel as PydanticBaseModel
|
||||
|
||||
from crewai.tools.structured_tool import CrewStructuredTool
|
||||
|
||||
|
||||
class BaseTool(BaseModel, ABC):
|
||||
class _ArgsSchemaPlaceholder(PydanticBaseModel):
|
||||
@@ -63,9 +65,10 @@ class BaseTool(BaseModel, ABC):
|
||||
) -> Any:
|
||||
"""Here goes the actual implementation of the tool."""
|
||||
|
||||
def to_langchain(self) -> StructuredTool:
|
||||
def to_structured_tool(self) -> CrewStructuredTool:
|
||||
"""Convert this tool to a CrewStructuredTool instance."""
|
||||
self._set_args_schema()
|
||||
return StructuredTool(
|
||||
return CrewStructuredTool(
|
||||
name=self.name,
|
||||
description=self.description,
|
||||
args_schema=self.args_schema,
|
||||
@@ -73,17 +76,47 @@ class BaseTool(BaseModel, ABC):
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_langchain(cls, tool: StructuredTool) -> "BaseTool":
|
||||
if cls == Tool:
|
||||
if tool.func is None:
|
||||
raise ValueError("StructuredTool must have a callable 'func'")
|
||||
return Tool(
|
||||
name=tool.name,
|
||||
description=tool.description,
|
||||
args_schema=tool.args_schema,
|
||||
func=tool.func,
|
||||
)
|
||||
raise NotImplementedError(f"from_langchain not implemented for {cls.__name__}")
|
||||
def from_langchain(cls, tool: Any) -> "BaseTool":
|
||||
"""Create a Tool instance from a CrewStructuredTool.
|
||||
|
||||
This method takes a CrewStructuredTool object and converts it into a
|
||||
Tool instance. It ensures that the provided tool has a callable 'func'
|
||||
attribute and infers the argument schema if not explicitly provided.
|
||||
"""
|
||||
if not hasattr(tool, "func") or not callable(tool.func):
|
||||
raise ValueError("The provided tool must have a callable 'func' attribute.")
|
||||
|
||||
args_schema = getattr(tool, "args_schema", None)
|
||||
|
||||
if args_schema is None:
|
||||
# Infer args_schema from the function signature if not provided
|
||||
func_signature = signature(tool.func)
|
||||
annotations = func_signature.parameters
|
||||
args_fields = {}
|
||||
for name, param in annotations.items():
|
||||
if name != "self":
|
||||
param_annotation = (
|
||||
param.annotation if param.annotation != param.empty else Any
|
||||
)
|
||||
field_info = Field(
|
||||
default=...,
|
||||
description="",
|
||||
)
|
||||
args_fields[name] = (param_annotation, field_info)
|
||||
if args_fields:
|
||||
args_schema = create_model(f"{tool.name}Input", **args_fields)
|
||||
else:
|
||||
# Create a default schema with no fields if no parameters are found
|
||||
args_schema = create_model(
|
||||
f"{tool.name}Input", __base__=PydanticBaseModel
|
||||
)
|
||||
|
||||
return cls(
|
||||
name=getattr(tool, "name", "Unnamed Tool"),
|
||||
description=getattr(tool, "description", ""),
|
||||
func=tool.func,
|
||||
args_schema=args_schema,
|
||||
)
|
||||
|
||||
def _set_args_schema(self):
|
||||
if self.args_schema is None:
|
||||
@@ -134,17 +167,70 @@ class BaseTool(BaseModel, ABC):
|
||||
|
||||
|
||||
class Tool(BaseTool):
|
||||
func: Callable
|
||||
"""The function that will be executed when the tool is called."""
|
||||
|
||||
func: Callable
|
||||
|
||||
def _run(self, *args: Any, **kwargs: Any) -> Any:
|
||||
return self.func(*args, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def from_langchain(cls, tool: Any) -> "Tool":
|
||||
"""Create a Tool instance from a CrewStructuredTool.
|
||||
|
||||
This method takes a CrewStructuredTool object and converts it into a
|
||||
Tool instance. It ensures that the provided tool has a callable 'func'
|
||||
attribute and infers the argument schema if not explicitly provided.
|
||||
|
||||
Args:
|
||||
tool (Any): The CrewStructuredTool object to be converted.
|
||||
|
||||
Returns:
|
||||
Tool: A new Tool instance created from the provided CrewStructuredTool.
|
||||
|
||||
Raises:
|
||||
ValueError: If the provided tool does not have a callable 'func' attribute.
|
||||
"""
|
||||
if not hasattr(tool, "func") or not callable(tool.func):
|
||||
raise ValueError("The provided tool must have a callable 'func' attribute.")
|
||||
|
||||
args_schema = getattr(tool, "args_schema", None)
|
||||
|
||||
if args_schema is None:
|
||||
# Infer args_schema from the function signature if not provided
|
||||
func_signature = signature(tool.func)
|
||||
annotations = func_signature.parameters
|
||||
args_fields = {}
|
||||
for name, param in annotations.items():
|
||||
if name != "self":
|
||||
param_annotation = (
|
||||
param.annotation if param.annotation != param.empty else Any
|
||||
)
|
||||
field_info = Field(
|
||||
default=...,
|
||||
description="",
|
||||
)
|
||||
args_fields[name] = (param_annotation, field_info)
|
||||
if args_fields:
|
||||
args_schema = create_model(f"{tool.name}Input", **args_fields)
|
||||
else:
|
||||
# Create a default schema with no fields if no parameters are found
|
||||
args_schema = create_model(
|
||||
f"{tool.name}Input", __base__=PydanticBaseModel
|
||||
)
|
||||
|
||||
return cls(
|
||||
name=getattr(tool, "name", "Unnamed Tool"),
|
||||
description=getattr(tool, "description", ""),
|
||||
func=tool.func,
|
||||
args_schema=args_schema,
|
||||
)
|
||||
|
||||
|
||||
def to_langchain(
|
||||
tools: list[BaseTool | StructuredTool],
|
||||
) -> list[StructuredTool]:
|
||||
return [t.to_langchain() if isinstance(t, BaseTool) else t for t in tools]
|
||||
tools: list[BaseTool | CrewStructuredTool],
|
||||
) -> list[CrewStructuredTool]:
|
||||
return [t.to_structured_tool() if isinstance(t, BaseTool) else t for t in tools]
|
||||
|
||||
|
||||
def tool(*args):
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.agents.cache import CacheHandler
|
||||
from crewai.tools.structured_tool import CrewStructuredTool
|
||||
|
||||
|
||||
class CacheTools(BaseModel):
|
||||
@@ -13,9 +14,7 @@ class CacheTools(BaseModel):
|
||||
)
|
||||
|
||||
def tool(self):
|
||||
from langchain.tools import StructuredTool
|
||||
|
||||
return StructuredTool.from_function(
|
||||
return CrewStructuredTool.from_function(
|
||||
func=self.hit_cache,
|
||||
name=self.name,
|
||||
description="Reads directly from the cache",
|
||||
|
||||
242
src/crewai/tools/structured_tool.py
Normal file
242
src/crewai/tools/structured_tool.py
Normal file
@@ -0,0 +1,242 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import inspect
|
||||
import textwrap
|
||||
from typing import Any, Callable, Optional, Union, get_type_hints
|
||||
|
||||
from pydantic import BaseModel, Field, create_model
|
||||
|
||||
from crewai.utilities.logger import Logger
|
||||
|
||||
|
||||
class CrewStructuredTool:
|
||||
"""A structured tool that can operate on any number of inputs.
|
||||
|
||||
This tool intends to replace StructuredTool with a custom implementation
|
||||
that integrates better with CrewAI's ecosystem.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
description: str,
|
||||
args_schema: type[BaseModel],
|
||||
func: Callable[..., Any],
|
||||
) -> None:
|
||||
"""Initialize the structured tool.
|
||||
|
||||
Args:
|
||||
name: The name of the tool
|
||||
description: A description of what the tool does
|
||||
args_schema: The pydantic model for the tool's arguments
|
||||
func: The function to run when the tool is called
|
||||
"""
|
||||
self.name = name
|
||||
self.description = description
|
||||
self.args_schema = args_schema
|
||||
self.func = func
|
||||
self._logger = Logger()
|
||||
|
||||
# Validate the function signature matches the schema
|
||||
self._validate_function_signature()
|
||||
|
||||
@classmethod
|
||||
def from_function(
|
||||
cls,
|
||||
func: Callable,
|
||||
name: Optional[str] = None,
|
||||
description: Optional[str] = None,
|
||||
return_direct: bool = False,
|
||||
args_schema: Optional[type[BaseModel]] = None,
|
||||
infer_schema: bool = True,
|
||||
**kwargs: Any,
|
||||
) -> CrewStructuredTool:
|
||||
"""Create a tool from a function.
|
||||
|
||||
Args:
|
||||
func: The function to create a tool from
|
||||
name: The name of the tool. Defaults to the function name
|
||||
description: The description of the tool. Defaults to the function docstring
|
||||
return_direct: Whether to return the output directly
|
||||
args_schema: Optional schema for the function arguments
|
||||
infer_schema: Whether to infer the schema from the function signature
|
||||
**kwargs: Additional arguments to pass to the tool
|
||||
|
||||
Returns:
|
||||
A CrewStructuredTool instance
|
||||
|
||||
Example:
|
||||
>>> def add(a: int, b: int) -> int:
|
||||
... '''Add two numbers'''
|
||||
... return a + b
|
||||
>>> tool = CrewStructuredTool.from_function(add)
|
||||
"""
|
||||
name = name or func.__name__
|
||||
description = description or inspect.getdoc(func)
|
||||
|
||||
if description is None:
|
||||
raise ValueError(
|
||||
f"Function {name} must have a docstring if description not provided."
|
||||
)
|
||||
|
||||
# Clean up the description
|
||||
description = textwrap.dedent(description).strip()
|
||||
|
||||
if args_schema is not None:
|
||||
# Use provided schema
|
||||
schema = args_schema
|
||||
elif infer_schema:
|
||||
# Infer schema from function signature
|
||||
schema = cls._create_schema_from_function(name, func)
|
||||
else:
|
||||
raise ValueError(
|
||||
"Either args_schema must be provided or infer_schema must be True."
|
||||
)
|
||||
|
||||
return cls(
|
||||
name=name,
|
||||
description=description,
|
||||
args_schema=schema,
|
||||
func=func,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _create_schema_from_function(
|
||||
name: str,
|
||||
func: Callable,
|
||||
) -> type[BaseModel]:
|
||||
"""Create a Pydantic schema from a function's signature.
|
||||
|
||||
Args:
|
||||
name: The name to use for the schema
|
||||
func: The function to create a schema from
|
||||
|
||||
Returns:
|
||||
A Pydantic model class
|
||||
"""
|
||||
# Get function signature
|
||||
sig = inspect.signature(func)
|
||||
|
||||
# Get type hints
|
||||
type_hints = get_type_hints(func)
|
||||
|
||||
# Create field definitions
|
||||
fields = {}
|
||||
for param_name, param in sig.parameters.items():
|
||||
# Skip self/cls for methods
|
||||
if param_name in ("self", "cls"):
|
||||
continue
|
||||
|
||||
# Get type annotation
|
||||
annotation = type_hints.get(param_name, Any)
|
||||
|
||||
# Get default value
|
||||
default = ... if param.default == param.empty else param.default
|
||||
|
||||
# Add field
|
||||
fields[param_name] = (annotation, Field(default=default))
|
||||
|
||||
# Create model
|
||||
schema_name = f"{name.title()}Schema"
|
||||
return create_model(schema_name, **fields)
|
||||
|
||||
def _validate_function_signature(self) -> None:
|
||||
"""Validate that the function signature matches the args schema."""
|
||||
sig = inspect.signature(self.func)
|
||||
schema_fields = self.args_schema.model_fields
|
||||
|
||||
# Check required parameters
|
||||
for param_name, param in sig.parameters.items():
|
||||
# Skip self/cls for methods
|
||||
if param_name in ("self", "cls"):
|
||||
continue
|
||||
|
||||
# Skip **kwargs parameters
|
||||
if param.kind in (
|
||||
inspect.Parameter.VAR_KEYWORD,
|
||||
inspect.Parameter.VAR_POSITIONAL,
|
||||
):
|
||||
continue
|
||||
|
||||
# Only validate required parameters without defaults
|
||||
if param.default == inspect.Parameter.empty:
|
||||
if param_name not in schema_fields:
|
||||
raise ValueError(
|
||||
f"Required function parameter '{param_name}' "
|
||||
f"not found in args_schema"
|
||||
)
|
||||
|
||||
def _parse_args(self, raw_args: Union[str, dict]) -> dict:
|
||||
"""Parse and validate the input arguments against the schema.
|
||||
|
||||
Args:
|
||||
raw_args: The raw arguments to parse, either as a string or dict
|
||||
|
||||
Returns:
|
||||
The validated arguments as a dictionary
|
||||
"""
|
||||
if isinstance(raw_args, str):
|
||||
try:
|
||||
import json
|
||||
|
||||
raw_args = json.loads(raw_args)
|
||||
except json.JSONDecodeError as e:
|
||||
raise ValueError(f"Failed to parse arguments as JSON: {e}")
|
||||
|
||||
try:
|
||||
validated_args = self.args_schema.model_validate(raw_args)
|
||||
return validated_args.model_dump()
|
||||
except Exception as e:
|
||||
raise ValueError(f"Arguments validation failed: {e}")
|
||||
|
||||
async def ainvoke(
|
||||
self,
|
||||
input: Union[str, dict],
|
||||
config: Optional[dict] = None,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Asynchronously invoke the tool.
|
||||
|
||||
Args:
|
||||
input: The input arguments
|
||||
config: Optional configuration
|
||||
**kwargs: Additional keyword arguments
|
||||
|
||||
Returns:
|
||||
The result of the tool execution
|
||||
"""
|
||||
parsed_args = self._parse_args(input)
|
||||
|
||||
if inspect.iscoroutinefunction(self.func):
|
||||
return await self.func(**parsed_args, **kwargs)
|
||||
else:
|
||||
# Run sync functions in a thread pool
|
||||
import asyncio
|
||||
|
||||
return await asyncio.get_event_loop().run_in_executor(
|
||||
None, lambda: self.func(**parsed_args, **kwargs)
|
||||
)
|
||||
|
||||
def _run(self, *args, **kwargs) -> Any:
|
||||
"""Legacy method for compatibility."""
|
||||
# Convert args/kwargs to our expected format
|
||||
input_dict = dict(zip(self.args_schema.model_fields.keys(), args))
|
||||
input_dict.update(kwargs)
|
||||
return self.invoke(input_dict)
|
||||
|
||||
def invoke(
|
||||
self, input: Union[str, dict], config: Optional[dict] = None, **kwargs: Any
|
||||
) -> Any:
|
||||
"""Main method for tool execution."""
|
||||
parsed_args = self._parse_args(input)
|
||||
return self.func(**parsed_args, **kwargs)
|
||||
|
||||
@property
|
||||
def args(self) -> dict:
|
||||
"""Get the tool's input arguments schema."""
|
||||
return self.args_schema.model_json_schema()["properties"]
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return (
|
||||
f"CrewStructuredTool(name='{self.name}', description='{self.description}')"
|
||||
)
|
||||
@@ -11,7 +11,7 @@
|
||||
"role_playing": "You are {role}. {backstory}\nYour personal goal is: {goal}",
|
||||
"tools": "\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nUse the following format:\n\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple python dictionary, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n\nOnce all necessary information is gathered:\n\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n",
|
||||
"no_tools": "\nTo give my best complete final answer to the task use the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!",
|
||||
"format": "I MUST either use a tool (use one at time) OR give my best final answer not both at the same time. To Use the following format:\n\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action, dictionary enclosed in curly braces\nObservation: the result of the action\n... (this Thought/Action/Action Input/Result can repeat N times)\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n ",
|
||||
"format": "I MUST either use a tool (use one at time) OR give my best final answer not both at the same time. To Use the following format:\n\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action, dictionary enclosed in curly braces\nObservation: the result of the action\n... (this Thought/Action/Action Input/Result can repeat N times)\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n",
|
||||
"final_answer_format": "If you don't need to use any more tools, you must give your best complete final answer, make sure it satisfy the expect criteria, use the EXACT format below:\n\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\n\n",
|
||||
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nI just remembered the expected format I must follow:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Result can repeat N times)\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n",
|
||||
"task_with_context": "{task}\n\nThis is the context you're working with:\n{context}",
|
||||
@@ -21,7 +21,8 @@
|
||||
"summarizer_system_message": "You are a helpful assistant that summarizes text.",
|
||||
"sumamrize_instruction": "Summarize the following text, make sure to include all the important information: {group}",
|
||||
"summary": "This is a summary of our conversation so far:\n{merged_summary}",
|
||||
"manager_request": "Your best answer to your coworker asking you this, accounting for the context shared."
|
||||
"manager_request": "Your best answer to your coworker asking you this, accounting for the context shared.",
|
||||
"formatted_task_instructions": "Ensure your final answer contains only the content in the following format: {output_format}\n\nEnsure the final output does not include any code block markers like ```json or ```python."
|
||||
},
|
||||
"errors": {
|
||||
"force_final_answer_error": "You can't keep going, this was the best you could do.\n {formatted_answer.text}",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import json
|
||||
import re
|
||||
from typing import Any, Optional, Type, Union
|
||||
from typing import Any, Optional, Type, Union, get_args, get_origin
|
||||
|
||||
from pydantic import BaseModel, ValidationError
|
||||
|
||||
@@ -214,3 +214,38 @@ def create_converter(
|
||||
raise Exception("No output converter found or set.")
|
||||
|
||||
return converter
|
||||
|
||||
|
||||
def generate_model_description(model: Type[BaseModel]) -> str:
|
||||
"""
|
||||
Generate a string description of a Pydantic model's fields and their types.
|
||||
|
||||
This function takes a Pydantic model class and returns a string that describes
|
||||
the model's fields and their respective types. The description includes handling
|
||||
of complex types such as `Optional`, `List`, and `Dict`, as well as nested Pydantic
|
||||
models.
|
||||
"""
|
||||
|
||||
def describe_field(field_type):
|
||||
origin = get_origin(field_type)
|
||||
args = get_args(field_type)
|
||||
|
||||
if origin is Union and type(None) in args:
|
||||
non_none_args = [arg for arg in args if arg is not type(None)]
|
||||
return f"Optional[{describe_field(non_none_args[0])}]"
|
||||
elif origin is list:
|
||||
return f"List[{describe_field(args[0])}]"
|
||||
elif origin is dict:
|
||||
key_type = describe_field(args[0])
|
||||
value_type = describe_field(args[1])
|
||||
return f"Dict[{key_type}, {value_type}]"
|
||||
elif isinstance(field_type, type) and issubclass(field_type, BaseModel):
|
||||
return generate_model_description(field_type)
|
||||
else:
|
||||
return field_type.__name__
|
||||
|
||||
fields = model.__annotations__
|
||||
field_descriptions = [
|
||||
f'"{name}": {describe_field(type_)}' for name, type_ in fields.items()
|
||||
]
|
||||
return "{\n " + ",\n ".join(field_descriptions) + "\n}"
|
||||
|
||||
@@ -3,20 +3,19 @@
|
||||
import os
|
||||
from unittest import mock
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai import Agent, Crew, Task
|
||||
from crewai.agents.cache import CacheHandler
|
||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||
from crewai.agents.parser import AgentAction, CrewAgentParser, OutputParserException
|
||||
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
|
||||
from crewai.llm import LLM
|
||||
from crewai.tools import tool
|
||||
from crewai.tools.tool_calling import InstructorToolCalling
|
||||
from crewai.tools.tool_usage import ToolUsage
|
||||
from crewai.tools.tool_usage_events import ToolUsageFinished
|
||||
from crewai.utilities import RPMController
|
||||
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
|
||||
from crewai.utilities.events import Emitter
|
||||
|
||||
|
||||
@@ -1584,21 +1583,22 @@ def test_agent_with_knowledge_sources():
|
||||
string_source = StringKnowledgeSource(
|
||||
content=content, metadata={"preference": "personal"}
|
||||
)
|
||||
|
||||
|
||||
with patch('crewai.knowledge.storage.knowledge_storage.KnowledgeStorage') as MockKnowledge:
|
||||
with patch(
|
||||
"crewai.knowledge.storage.knowledge_storage.KnowledgeStorage"
|
||||
) as MockKnowledge:
|
||||
mock_knowledge_instance = MockKnowledge.return_value
|
||||
mock_knowledge_instance.sources = [string_source]
|
||||
mock_knowledge_instance.query.return_value = [{
|
||||
"content": content,
|
||||
"metadata": {"preference": "personal"}
|
||||
}]
|
||||
|
||||
mock_knowledge_instance.query.return_value = [
|
||||
{"content": content, "metadata": {"preference": "personal"}}
|
||||
]
|
||||
|
||||
agent = Agent(
|
||||
role="Information Agent",
|
||||
goal="Provide information based on knowledge sources",
|
||||
backstory="You have access to specific knowledge sources.",
|
||||
llm=LLM(model="gpt-4o-mini"),
|
||||
knowledge_sources=[string_source],
|
||||
)
|
||||
|
||||
# Create a task that requires the agent to use the knowledge
|
||||
@@ -1613,4 +1613,3 @@ def test_agent_with_knowledge_sources():
|
||||
|
||||
# Assert that the agent provides the correct information
|
||||
assert "blue" in result.raw.lower()
|
||||
|
||||
|
||||
@@ -1,4 +1,415 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"input": ["Brandon''s favorite color is blue and he likes Mexican food."],
|
||||
"model": "text-embedding-3-small", "encoding_format": "base64"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '138'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.52.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.52.1
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/embeddings
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1R6SxOyurrmfP+KVWtKn5KLkGTNuIuABAFRu7q6QJGLIAKSQE6d/96l367T3RMH
|
||||
EEpI8j63N//5r7/++rvPm+L2+fufv/5u6+nz9//4Xrtnn+zvf/76n//666+//vrP3+//N7Lo8uJ+
|
||||
r1/lb/jvZv26F8vf//zF//eV/zvon7/+jq6vjKh9O4J16w0pDD6Sje2RqsYS44hDcWl9cPKQzIYP
|
||||
H8KK5NDZkVu+3eVi0B0ntNmvzxlIl3fM8DMLoM+bIy5Ux2e0iE4p2g/aRIr0JXu0ntIb1N1dFLDx
|
||||
vowUa0+I7jsjmhXu9AKr4lIODLV4wOaJfUaWM6xD7yWLWAN5O64qLkxZGDl1trNOzJmJMxdZUpbh
|
||||
AEQuW8BVVtEHpGdsyZmZi5p5NBE+0gc+HuXJmyk6l1ADg42tKQ9y0Xw/fPh63+7EZnXH2OF8HODb
|
||||
G0ZiuboPWMlffRCXuw/WsMAD5omxiNpX0c7bfPUNcYtoClEq74JJuHfjcnknNjrPtoEjd5FZ9zjC
|
||||
AVrdxyOH6fUEzJUVBxl740z2mWwCQRrXHp128E5uN6UxlmI1IcJucCR76ebE0t6UAiW/imdsTI9P
|
||||
vJbUKpCY+SI2z0YdLwN0dODkyCe5qUYetTe0R/PoaiSXjz5Y5wSY0OLiS7A62gxWWYlUsOmVDdkJ
|
||||
6p3xb28p0et2+BDtVdFmQU5JZe58UcmDNi0j8Bqm6NpxyvxeMx8ICbhPf74n/+y5ZlHOvgnfA7MD
|
||||
fnPaeWTIqg5VoZrOfaJazaeOjhdErOOeRNNUNnT/5ETITnxGwvfrFbOukCMYDeEb37I7x0ikyTMQ
|
||||
RqiS073hgQTTJoNR7gREraukkYKqjZDw4iNi4EUASxTJIfi+Dy4CIcqlzUd6wpFuP/g2hed8ZUvl
|
||||
ImSmhwBpWwDmy92BSnD3pBmMYW7wz4Wm8JiNJ2ycGziyOAo7NFwNOG/GNI1Fa/IdMPZjSCzmXWM2
|
||||
XbcBykOSEi+vroy5Lc+hoTpe8f1xVwDdP0URzdltxfErujMeKFoK92QKZkELhpH2e/MJJXg1sc1h
|
||||
f+TpCnz45CoZBwduGNlukp/w8fzYRMWeGovT5ZiBA2fcsNayyiA1Vbaw3h+35FJ2QSPa7l0FjYNR
|
||||
sGZlbgiZeBsgV8AD0ept7bHdtHTQp71P8kKwgOT1bwWcac8RCzgxY5zjmOhb/9hF8Pv+99BFvirW
|
||||
RBdO2PjdV+hV90jxfb/nhswZbPHBwdnc2Q1/3SYOOk8VJnGmqkB4j28XlIZQ4WKeNLboB0uBNr+M
|
||||
OHfNvUdgOmaKogm3AKJM86jkeyqULOOA3Z23b4TbyiWwGqcjuWkGG1cbjiqc9W2LI77Y5TxnNAPS
|
||||
l7s7z9v9yhbbEQNU05GSfVPHMZPPCUXdLsJEm2bVkPrdncJPs7pEl7n7uF5TYUacPS3kxuucQa/b
|
||||
mwsWyRzJ+WSWDb0Ogw85zuxJEQhrvJJHMMHU3tbkIgbM+K6/D6mJg5mpOg/WBMISnt8mJSH2ypjd
|
||||
02UFV32PsG0H9jiFXNDBIg522Iw0G/C6qWaoql9hIH1eptEb1HPh+Z7xZDcXHhO28FgDf7M7EzNZ
|
||||
ImPZT/0WJjCqiHF/xYC/8+YNlqU/4XQKckNQtixAerZW+EB86C2+gRzltegj1pygNtjKmRBO5/aE
|
||||
PXYQxvVxhD2Us8dz5sxYbZYeyheY93FDHEWVwezpRIduv1kCoGxgTkMueMJtXj3m6n6/jsO4W2uU
|
||||
ctGVaNaCWB89/At8noUj9l7v3qDhA1HYRGqAo3F4MjLurwlsnAOaFWojbzrLVgkGQ2/w93kw303e
|
||||
hJNzU7D33Z/irv/w4Mxbd2LVbZhTqrY+Wo+Yke/8MVaVNx0KZ8/AarA3mYSkt/vbXwSzmAJWzRaE
|
||||
WwNFWCPxOWYv5NpQKL6rg5Gf8wWdHZC6Z42408M1hMYPXLhB6mfmXN8An/OkZJALQg7nryLJ+dNl
|
||||
t8IT97ZI5Pv6KMQcr8P47c8kF+yrMafQc+ByhiF5OMfKeDc1jRB0xBfRzivJ+/e1KZC5OccEb0Iv
|
||||
pp9WGeCzNTUSgavtSZ8NmeQnuRbYEyutYdd8f4PVQsbZlJoyZ9qiDGg7jArZ9a0zCicaTkgqnICc
|
||||
goM6So9ligBneYiol/t9XL1n6kDbsFOciI4EmKA6HZRFnuCgS7OYmkeVgyJ/5oK2mp1G6AJtRuKw
|
||||
tX71PFKS1BdoF5ZOsOuasXCoLQ5lYhcEm3u5HUnxuvRQ5/QBm77TNCs6diEK0kjGh8TUYvEqBDXg
|
||||
dw3B6vZRMipxVx3eNneT3PqCNst12utwpMoHu88aj3/4d9yXQtB7fuMJxlhMIM3VkNyPL+SRbmp8
|
||||
dNYzhxykvdj86lFxA/4dyOd2y97+eU2Q62UutqcAGFTYbhxwPYsP7J6hnYvv546ibjU7nGxOO4N+
|
||||
dm8RNuU9w/up2jerfBI4FBoPfqbzW2df/FhBD8twVgrJyufLduKhYT235AoC7AmCcuLQjz+9bdYa
|
||||
LCxsCJuPqmArEolBTrfJh8Ham8TFG4+x4EVrpFWCiK8xW73P8S5G6HBOeLJ7XWTjM1abJ2zKR0b2
|
||||
ERximhSxCphsNwEyT6qxXu4qhAfxgYkXPx/xcBXsEup1Qr77TfMEAEoXSafphsO+9YDkyyqFV91D
|
||||
2LNkIZ4QfVAo+/qFuKos58uhvwYgMZUB+59yMoi1D7agcW8qvlZ8wwTKZzoIFk4IxHlX5OzQGh3S
|
||||
C6xif653jeAbgou4k+QH0sbkAbmKbQIn4tXEL5idr5xdRei3vvrecPOvHtMh2tEJX6L2BiQQczfI
|
||||
nTMVa5qDY6qc2hCapeIQ/Oy8mNojuMHWSVziOV7L6O295ZFlPPRAnILce+9eOx1ujU1EDhv76JGZ
|
||||
rye0X1olWFH0Hmku1StaQHANqPCswCInDwfah5zN4nCSxlmr31uQX/kzdsikGbQhXgbBe/KJ7vt1
|
||||
Mx1K4kC3Rwv2QfoGbHoNLhA9O8deXslswcJxBenpPM/1STXZCt7aCt6P9Ups6ITGOl2OF1S7/IT3
|
||||
Q995rPkkHfhczIBkYZ81LEVvCJVr/sZ4yT7N8uNL/pio2FgbM2b7vErQsBW62WukKmaOp98gPvQU
|
||||
746UH1dasUFZjwdGsiGPGM0cZqIfflivl8eEQ38MYKSdOOLNz5fBskQM4DG2HHLcRp+G5tJA4cZO
|
||||
LHJqbhfv7VjnLaxje4+1YNfm7GFVNdS74o4xf/JHaZu8eDidX6dZfnduw8pUvaB2M2kk1E5681Gf
|
||||
7wy6Wd3OG2BJ47I9owJuaXInZ+5wMBa9oSp6JpcVWweejUvwXBX0fqWvmTOOT0CvQ+0j6c27+HLS
|
||||
K0/yzIP40+fESDgrpgXtHESseE/MC2x//gDCLz/gI4mleDAmoYaJFAfYyCRnFBLtrSrcJ78GpXKg
|
||||
+Wp6doaWMxfO8EJIzqposMH5Ck9fPdQyUeTeM+yVzY24F073/vAjRrcsAEuyxixnOxWyN3TwowCa
|
||||
x9/elEcEvlVyxX2RC+tNLEFGVBXvPq+nQfXjmiJtN814N7oPsGizoAMsqD1JoUO95bYvS5i12w35
|
||||
+iuPTw7QhbZXOsQSTAjYpgsVNNsnk1jqWQdCBLGp/Nbr8Z1fihKgwCDiM3LsGz+mPz3RMreaueOu
|
||||
ZhOMbAi+84V9HYqAGGWdQUeFJdkJ1wHQGN19+NVPJP/iA9l04RZ2gXcIVplD49xu9k/w9SfYtPQQ
|
||||
TFna6vIXD3GRXR+MlqKjwo2dWlgFd8tjcenWUFsrK+A5a82JLzsrPImHGIeusAWf0gp5REa0EKfs
|
||||
5mb96gtAmReT3Z4pBhOL4wUmWE5JCG3GPjHOOCBp3UyyL/5OSzVAcFfgQFQAHuxTxLyLfvW/fyWZ
|
||||
wdjtzsNrJPkkQN4cU2LTG8LLc49vjyHwlh1aHHR9XKYgUaW78UxkcQvuUu1h9dYfY1Y2a4LUZ9Fi
|
||||
mz7rZo4Ig2hRGndeY+5t0NO8KBCbDZlFzR+9z0/PHorDik351jA2no0QUn7eYQ/d1FFKyVaF6TLJ
|
||||
AZxCKR7icFkhON4oca/CGzBzEHV47aBCwjMfgM9eiUQUdaJL9L7ywdI+jhC+7ms1z02HPcnwPz4s
|
||||
9HI70+C1aT6H+sDBjIJpbp6fblwSrVKRATmC7Xbae8tYSR0q0DnAvrPv4tX0ggsUBWXF9u0hg9Wa
|
||||
TAfOH22aKd3L8XJIEh0pIZICGCKn+SjrC8JyXxrkV1+rkFkDsPK5+OJ9wliEmx4STRyIfrty46R7
|
||||
LxP6UuViH4OJzb0lQrCiJ8KHjb0Ys9PiGvpS42JcjVM81cFhgI/EkAg2gBzTodmY8PEk9jySPADC
|
||||
Dy/KMpjwjq8bY3GCi40sIvozSvkpXuK7qYBfXsDp+B2TXVBSdPRKBSelrMb0MjslFHAn4MAzvEa4
|
||||
TnsVivbOwXtH8xq2U8YanquywY8dbuPFCUIbaVJ9IWb9wmC98XcHXIHdY/OrFz5zZftIpU6Fo+Ql
|
||||
AfY6vFXQ9MYLY+dYeZ+V9QNMpGOA3eMW5yy9HraQls8r9rN7wWZcg4uiH98lMUu5jBmHOwr4KZ1m
|
||||
sNsPBnsbUgEv28nEafKJjekyOzVcdrFGnJTScfnyO3zHvvzFgxqML3JxoQr1dQbpdoyXhWt70KBp
|
||||
T45W3Hj0c8opPGbv0x/8FHkZi6CUVpm4MBjzKd2EK1SUdUfMNT16C3s0NxhAVGFzTReDlI2S/vT+
|
||||
zw+wZb1x9c+fBP3SPkdB2YIA6nbPcGDG5bhQ/psnHA4GSbrVaCi/0S/oi1fYkK00nhXepuCnP01j
|
||||
//nlFeoffZcZqsDWttxS2BuvNhB1JWuW/aWZ4Hs9JcRjh1Oz5kqZgTGoHeyV8z4Wu3t0g0IXF9jX
|
||||
DNasb0VWlDO/u8+0b0dGq+SiQCjt7mSX59zIfFld0V6KbsRQ9YTRQlwu6Dw1GBuHbTQu751lw6y+
|
||||
lWQfs9WYBcBT+Oa2PH5o/A18wk17gZy1R0S73+VxDOGDQqepsrn+6oXVnmUb2u0YzjLaReMY2FqG
|
||||
zlfuFFBfDg1yhdsOJnv1go8oeXvrLgNb2L5uLbmZQhZPfDanym7y6mCm/jamoXncwstHd4hGYilf
|
||||
HOuxhb551LD9nR+haqoB8Ql9zGUXg3yqQ6WDUe4G2PVFBtZ9ppeovAwFMT6VA+ijrVx08i6QnJAm
|
||||
xQQ6SwFgUjfE3B+ezQLjz6qU4HCZ0TBrsQjlsoen9PrGxo1Oxvp6zTa88cUl4EQrHyldQfDT77O0
|
||||
Z5nBnnRfwifsgyD16bahp7CyEf9gHta/+dfg6S8Vqv7BIbiKNvnin9cUWqkGMZbPvUcBE02UFcmC
|
||||
bd47NQvtevtP/Rxi3RqF7OKJysU4bIl1Uex4ZtlGh226pD89bazVRBxoAoXH3qtIYkHTlRAWw5rM
|
||||
Xf/qDXaZrjO8QbWd+5PcgzWLzxn45o/ELaQ2JpkcRWizuLfg43DzuJzRMUKb6Vxg+7K1GSlTJ/vp
|
||||
nz954h88IZlpY1OV7t4snxCEFLQWUbs4jz950tZQ8J30h7dsjtBHgd4phbP8xZ+JHk3zT97JU8yP
|
||||
7f21pD8/MG8sp2dsyN5P+F2fH54CVoRzDb3w/CYOXzbjRAtRh5rnHcjesqdx+uGnWssL9lV/ZMOi
|
||||
miWMrm1GvCsb4uGrZ5T98lKwNdQD+zydlwg3yXIk3zwlFsugSKF+TnWiAf3SfIbzCOE3P8G7+1YH
|
||||
c9HKInS9i0vuVcrlP/xTbvtBI8ZJOBj89UYj+PUHxFfsk8FIBJ6wqNN6Xr75AH3d9yuM1U1EDM/Z
|
||||
xVT8KM6PL8gjkdw/zwPOCCMc5pt6ZK3a32Cp5voffqHy1ejg8bW1/uQJs3hbO3gJb/08c7rokXog
|
||||
PTx4wzZYVdx7c4HxDFuirgTPQ51Pe7tTUUVc+uXHt0E39qUDM75Z+ARdMpL7eh9++UcAszsHxovi
|
||||
9ZALIg7/9uuHNMCFUzmfsbtUVrOOUvwEjw+/w0cIoLFOilrCMqkpwa2oGqK7zROQfzgrAPTuNqtv
|
||||
5Rxc/DAJlP7TxCy63zuF3ophfrpYNaRrrhVQvSln7BT1OV/iJnSh+zJtHGz3EZBOwXc/36sn/vkL
|
||||
+tHSDgbtNZmnKTzHC0n3AaDx6Y09x7MYH3J2By3pkuH4sI2atToaEeqekhjU1rFpvvpY/9V3MH/x
|
||||
e7ot5/q3f4iWo8abL8O6RVe54cnhtFI2teHeh21xaogZdly8Nutehffi9cD+M4bsu17/5q+oFvWG
|
||||
8qX8hF99jC097Ixl3jxUmL3PxTzPMQ9oeRw5cJiBNvOzh3KKdhqHtlMwYz2An6YDIpvhNx/8g+8M
|
||||
xFzxy2/IzpvuMZX5VwF/eYMhF02+zvVHhbW8q7H5HF/ewqvvCLatvGBNCo+M78rehFmrbIhbvydv
|
||||
dcAygFwPXewlSwCeQNmncMcIxe63/7Cc0TWEm367ITvZW0bqP60CXJfNK+D9jZf3C7JFIOCngHH+
|
||||
EgymPdUJ/fT/1+8Za6ChAaT1s8DOwbKbuRkF+qfedBrHIz10ykWRDsUGa/ONNPR11yhK/EIKWlCV
|
||||
bJ06LwHUYzlxj9qu+fBqFUEVzhoxqqsyvr/5E0qIsXzzJZwTc+B0eE0yNfjpo6+euMEXf3eJ81z0
|
||||
fNnCYwm0iOSzEBzKkemhUUNIGhwsp/Jh0G9+98tLyK493sbJGy4T3EFxCaauPRjT+3XK0M8P2unG
|
||||
yIn2VGcwLFyPMSdx8RRwEoR9KJ6+/BMz6d12Hbg7Txvb5fAZV3GbFmhz30Y4PxyqfBl7nYdD5HA/
|
||||
PjQ+vutcgLwJK/yA0c4gnnng4YGpdBZ2kdaw2k9FuDkmK/bbuY3Z4Xzt4VzPGPu3Kv2TD0Gef+6J
|
||||
ulwDY8nEWw8P55SfWz20DSkwkgKam1McCM3tYqyWur+AL37O1TtLDOb17y0cIpeb++5xZF8+N0HF
|
||||
ogfGUvEcmXfxdHjaJEnw81eLME06/OlFjZYdm++nDMKuywbsJGrbLKW/qaGbZhL++klDUA+bEKgx
|
||||
SwP+rLgerWqdh6B8ecReo8ggQQxS+Jblilgypg2t1XwG6Q2AWawbCsj4RAHgYMWTn///9ss4+O2P
|
||||
Eb35uLk4vqseLuvugvdiW46fpdFWCG/caUaq68fSLQtt9NVPxE+e+fjGxWuA3zyFHKqUi//4QXOH
|
||||
bvj+7U+txKh5ePZJO0uvi+ytKTRcCK3M+fr3eGT7/J2CP36HdXXz1acO8jsJYEcMmDdonkThKLHx
|
||||
33x5kWz1h9dkp2dlvGR9PSFtN8+BWFzuxsd5Mwi++DYjIzZzgtM+BN+8nHjf/owoS635wxtiCDEZ
|
||||
yTiqBUKoJcH6JJ3BBFINaOqjZ4CyjWoIgZqKCl66/Q+/wPLLN0svn4lKazsXrSIL5Az7KSmcJY9X
|
||||
zZNW2BX8LUD06jWrlJIUNoft49tPWvNVM682JFWfEGuoXbBUeCpgf5s6op3KjUffg5v8/CHR19YA
|
||||
i2vONXBKGwR8RXuP5cfHVzGjKrjMx21D8+uRg0pUj1jP5N744Te8aMkB7z+XJV/uQhMi0TPzWc7p
|
||||
NK5nQdpC6i15cGblLl9Rf7lA/E6mWf7ll3654aGWi7d51i7aKGbedgKHXjWIOd0PQOh76ENtbSxs
|
||||
IU3K2WpeInibmUAOdzLGc5m8n1C3B4Z12xljKtzzEP70o688dE/imhGCKjf0WXad3vj5fUCCizsT
|
||||
tA+AOF2uF/jt1+H7xj4aPGCcCWtuNfEh26ger39SH0aV12PfDmuwHprn+ut3BcJ4XZvuujcoEqP+
|
||||
TX54wbbjEf70a8B9+8Xffm0NE/8mkW9e5/HS9kC3v/zZIL0VC+HCbuiL7zNnuX38m1+E5keCv3yR
|
||||
s8DeZ6DvLzXOdt5+pGP3VCE+rg+yM469t66VHiFcKkqgfPO0n/9Gv/5JUCC1EU3PvqC2ODfYmY+X
|
||||
hixMTmDYcyrO80oGFOxYCg311QZL8ZlzlqIKomN9aDA+wtF7/+rfbt8hDrcPlYlWEQXo1z+9j+3b
|
||||
WGperSFz2oHsv/ky/X4f4GDDY5Xur9/9uVnhYyvy2AoOarOGLIr+9Ce0JoWA4WcUIIFkD+yfNk9v
|
||||
CeOhB1HHu/j266/J3rH/zR9+nOYjWKPaVYF7FVLiKRsY08joa/D371TAf/3rr7/+1++EQdffi/Z7
|
||||
MOBTLJ//+O+jAv8h/cfUZW375xjCPGVl8fc//z6B8Pd77Lv3539/+mfxmv7+5y9B/HPW4O9P/8na
|
||||
//f6v75/9V//+j8AAAD//wMAOXBqMeAgAAA=
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8e94839cd9e9967f-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 27 Nov 2024 19:27:11 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=XviX9Hjm.Uy8aR.6KFXUsi._PlZSGHz_33BG8yN1gNU-1732735631-1.0.1.1-xpDmkFSh5aO2fugj8VCyrc23NL7wf6Q8eq_yaxcwutJZAO5nSx9Eeqko_4UhxH4IQBfS8cJSaEmHnXWPD6lTJg;
|
||||
path=/; expires=Wed, 27-Nov-24 19:57:11 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=Xz2QlgphZCJYG8KTd5zZKB.lSwPBCu24Nwv2aB6FkeE-1732735631371-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- text-embedding-3-small
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '272'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '10000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '9999986'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_5cba1175a36bccbbad92e3ef21b7021d
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"input": ["What is Brandon''s favorite color? This is the expect criteria
|
||||
for your final answer: Brandon''s favorite color. you MUST return the actual
|
||||
complete content as the final answer, not a summary."], "model": "text-embedding-3-small",
|
||||
"encoding_format": "base64"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '270'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=XviX9Hjm.Uy8aR.6KFXUsi._PlZSGHz_33BG8yN1gNU-1732735631-1.0.1.1-xpDmkFSh5aO2fugj8VCyrc23NL7wf6Q8eq_yaxcwutJZAO5nSx9Eeqko_4UhxH4IQBfS8cJSaEmHnXWPD6lTJg;
|
||||
_cfuvid=Xz2QlgphZCJYG8KTd5zZKB.lSwPBCu24Nwv2aB6FkeE-1732735631371-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.52.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.52.1
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/embeddings
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SaW6+6Srfm799PsbJu6R05SVWtO84gIIXgATudDiCiICKHKqB29nfv6P/N7u6b
|
||||
mUwmM0YY4xm/5xn1n//666+/u7wui+nvf/76+/Ucp7//x/faLZuyv//563/+66+//vrrP38//787
|
||||
yzYvb7fnu/rd/vvj830rl7//+Yv/7yv/96Z//vrbVLoSHzk5BMtWoT1gp3eFzUA0a2ZdPBt1Tqni
|
||||
qC3bejWaQoT7x7TDxVCH9XwNkgppWjPT01tv/PlmCzGAy97HjkOVehnWOEbb0/tE3fEjAtoiL4RQ
|
||||
1Tuqe2UNFusgp8hF3ZbGcqkPM55dD3oW14QiPL6M5STtPXg4WB+qSkPoL0FezEr0oQa1QWb7pA21
|
||||
M/IfsocN8pRrmozbCH4+4wVrougOq6UdI+j2OkftUt36i58tPNr6XYcv5dgl62H7sNHVFrRQvkk6
|
||||
E98Cb0MzqS/USK92LRjvi4uKZykRpFod621tE8IYKnesXk/nemmyxUNbtkPhKX6eAH+HzYruRgBw
|
||||
FgbHgaj3voflwJ/oOWCHepWjU4WOY0OxPRJhYHIgR8g/lgcamp5ufH7ft9kUFmHxq2OST9RIObvP
|
||||
E7aEdcxXWfIyiMb9gP2evcHq3GaC0pyvaIpELl+O3auAPkkRPd2iNZ8d7xkjAB85vWp+BNg4yg20
|
||||
vGWPjZovfLLcPxFUOINQPNkfRqL4GsPTra+xbjivYV6CTIRBvS8Ia0acME+ORgRV/0JtX7LBzI1x
|
||||
gcZs2mI3TDbGx2cfDh3FbMKm9Vj9pZXkCj1926b6mzv6y7JLnyhq9xJNx7dvsNB/9uh1rR/Yv+xU
|
||||
xs9NpSNguSJNl/o08K9ENRFMlhinyf7GVo14Z+h3p56a3tXyRbtrTNh7qx9uww3JV+ZsdEiEXUCA
|
||||
p0aMvIuHCnzhaRGxVSRAQoFfoRsU55A/jxNb7a6xYXrqSnratDswVHnUI3NGEY6ndWGLz7/FraaV
|
||||
Gqnj97GeWCrayEr3Nj5kFkrmV+LaaCW2j4scbfxF2TkZmtrnTIRbaNe87FUKOrc2CpkhVMN6Y+cZ
|
||||
eh8J0b3+dH2RS6EKT+93RIPlMuak1PIWbk9li7WGbQEJi8cIz/F7wf505BhFTqxCFFZHqpouNoSb
|
||||
karg6Zs2Dje2b/A21J8w0wijxgm+jYVElxaas/j41fPAO14fw+Z5GGis9m1OIw25UEW8hWOrKwb2
|
||||
wdsQJe/5QB3b1WvpKUc61E7BQDOy8Wu2Owwi/NYTjobUSvi0qUbEKVNKo41a1Xy0eY1wubclNvNG
|
||||
BeKJ10XUlqcNvvkVlzO69DMkHJ/h2NkdcmkCaQyDa0zDw9k4DPNterqok5eY7nPzaQx8ckyRW7ws
|
||||
fBTMQyLtek6Bo3Dc40O5KYxVtZUnxI38ouddefcX7sNM9AgkiYDCueQiWrQQprpa02N/3Pm8e91x
|
||||
0EweFxoUUmmw8tKc0VFMJ5rvL9hYpNGBiLpADOVgdAzGTJ7A1L0cv+8nSKjwvKvQ7wKArZrekqkc
|
||||
9Bkik1zIcnAcX7g+mxhGReJRFd1UY7mMh1U53pMXDi7aceDxYusI+MGOWvLhMsypP7conxaHXrZ6
|
||||
ZUi7z1aF1lMzablEDzAjS+8gKCGjxXi/5mt1m1W0N70HAVDWh/U53Voo27xHT6XA1UN2m57gp4fX
|
||||
+BzUNXdaRmWKVxs7+zpN5g042fAePS1q37gpJ9ymk8E5HC9kVcfYWLqpM0HpBhnNLu7GXy11OUM9
|
||||
2gfU6ZGVTyfxGaKL8RSJUFOUrGjZBbAj/Aur8zEyZvCuop+e4ov7ngB9ndYUusXbwmq2/9Qz/8kC
|
||||
kDLzEo6h5Rn8M5R52MksJvnqtQbTx70CP2xhNE6KzCDP6dRAOSstqqu9nfBQ7GWlMSeDiF895aFk
|
||||
8fATGTsCmrM7iGHYcGjOVhXvJB8n0vMweJAIB0B3Tg/AIghZBdjVHqkPlTBfN++EoFLYLnj/4vtk
|
||||
Pet9Ay4RR8nC7VsgFUqjoP0x34Tw+/wXZYczqGfGjjrORADrpRqC8tUn2H6Kaj7uH1WJwsp2KC7P
|
||||
j3ou5NsTSrqXk0LjJ38e+08Ip/l2o7mbm4OEqpcJp5Gs2GGhnn/7t0LYEJdw3usT4K9ZFKNeMxE9
|
||||
rbctWDeh2qHD/jaR/iHmg3D1LiYsNgql5vqy/XEJYhEN1/SNi+zs+NKmuabw9MjD8J42lE12zMXg
|
||||
8nBK7CD7Msx7hT2RuVgbfIOsZms04hg4e+5Dw8/WGERw1ER0zY57ihspHebEiM0/+v99nuxThH6K
|
||||
1ACohMsOXSLct88CHcdCxcVyqgb6KMonLARo0LBvIaN8sjnCQzM1Yf3VI1HSkAh3ub1SYz+dgPDt
|
||||
P4Bes08PHF7r73zo0NRmGrVcIiczXmwVSSMWcVgQh5GMNC38zlOqK5phMIljBfy9Lxa/XCA5e6lC
|
||||
t7fb4jsklr/m53sGC/9EaWx1cFgHqihgHrQyfJXL21gPCuEAMgUb2+eXkazNoslINfYrvdjucxDe
|
||||
lR3/qc8Ad3391dcGxGiHaPkYOYMlx50HN1vzTDam0oFVvHYmbB8bEM5GGPkzy08lMrQrxXF/2yQU
|
||||
9FkDn/mlCeVX+DCk0SYdhEBzsX21lWRxHjcIh3vzIcr91AzrTplt5NXpjUbWlOfSb15BdXfBp6nO
|
||||
fWHZRRUonwcRX779LorXyoRekJxC3tBtNpHbNoPJ0mB8y93rsCyA60GAi4U0NHTY8rbTGK5ZGVH/
|
||||
oZ395TLyIzyLeMRB+26MtfA1GzWD6BH4Mut6oaUUw+/7oOHGPQOW2EEGKpkryVh5DVtpnulwimUB
|
||||
G/pmmy9tVBGIH9crtu+PiX3MQuWR3116jLNNnYwzFY+I7NcWBzbxGcuGZ4SeRJypq+aeIejtIUbv
|
||||
6t5Rd3l8ADPpGoDjHAzUIlc9Z3Iwx2ir8QMtVmUEy+vDZqBwGsHOJ2vAPMUiB9PXsqfYhPyw4tu7
|
||||
RXGND1TbJq4vmMWwwl8/75PJAGu9W234QDMkgNI4kcSdAeF75+jhvDFlY/HZg0OdM5XUfEu7gV5U
|
||||
4sKwTWUi/Hj4/fRGUIj7B/3Nb3Ze1AqW5aCQ5axZTPz1G6VyiHPhYSbSQdoRuNk8RLx3ihbMNxvF
|
||||
0NWZi1XFnYZ5oyge6JxCpUkpL/XCv28zaA/kRr+8VrP1NvLwurQmVtGtMqh371rgnuuVekYRJGsU
|
||||
wxA25YfSMlY8QwSAb+G96niqmnadLEXoZ5B/t1dsIV/M1xA7MmTi80XV+QzYCELZg+8dXqjx5SFy
|
||||
vJAM8Msg0N37tQHMuug2IqaT0n3s1fm0PwRHOBusI/XB5P15eekcDF24CbeaP//uN2EpgAU7dIuB
|
||||
RNc9gRe+7ak+JG098a3Cwzdv1TQMVT+fuKhToTmFr3DrPrGxzkYgwkfva9R36iVfLXV7BGWYFjQ9
|
||||
tBbgX8McopdStNj2pRbMpp3ocKESpCYN30y8GFYFr43nh9xQYsDGKa/g8e6KWMU3LRFGrYDg3/1o
|
||||
hoOYOZsR5sA+0NKiJGdffQP9qur4YIqeIYALteGaUYJN9fWuV3ZKeViUqKeHzyQPC2RdA8X3NcW6
|
||||
XD6H6WZEOvryPjUzrcoXWm7iH99Q3WpePtHhU//xSXgr9HT47LjTGYnGcqNaRfWcPw4mD6zjpIQC
|
||||
j30g7Q6uDA9yOFO9E9/5CndLBsMYjOGK0kc+X9lwhNzuKVCPbPxByNu0AV9eCuvNxCcfWm4i2F7k
|
||||
OzX7M8+W6Dj3yJN2Dtad3SGZn+HMw3kwSuoiscwXafBseCDZig2XST7ty1SGx7g3sCvDzTA9L+8Z
|
||||
nuNzQv0XODI2PTsPcEZo0n2Q3RL2PKgB5G669u2/zpgLaW1gfH+daDY2qTG/xtwDd8MMfjzgrx5A
|
||||
HBTGuiXbZDsZPV/PFaryzxEfwv49THnLntAy3z4O7fcjXxzXk+H3/7967LLlXYWxYplFgbVmIcaK
|
||||
zeIJbnxzxzuhI2BIpmMA0+1DImttCgm15pMLovgFsdFpGlg/MaggOaRPXIbqkLMJgED+8bB34eyc
|
||||
XVQ/VH48FNzjZVgX+ZD9mW+WdJEHFnW9CtvhmVH9GkmASa8ug1+/HQLJhcm868wGJu/1gLEJjwOf
|
||||
mHcI7MSWqcZfd8mcGJkN+WnrkXZXbgyWvnMd/D7vUG8zRhYZnqHcRAaOBR8wOsihDP0yDkPus62H
|
||||
1agXBf708Os/vv4h0tEnyPd4vy1eNT1EhQgy2DYUf5ZL8meeXrPzHmvsIRprsOljUMRHF2epbOWi
|
||||
mPkqkF7zGe+6ewnmYJBlMHovL9y6VZ1QNppPJSGOTnduo7D5OJgipOjoUO0uUWM95/EIQQcqrEuN
|
||||
MfCPDLu/+sI72zDYvLw8DhUbmeJ7I6U1Pa9hBo2T0RNUqAFg4syN8M7ZI1Fua2OsCWUtbF/nNASh
|
||||
XDPxUM0Q/fjbwdwAmjPZejBiDw1fvnozJ8XkwZARnV7kQGbDYFUr/PF5KL9Vn1fZzoTFs5Dorlfi
|
||||
XAgPNIY/Xryz1xEsT67o4d67x9gpRzcfA4tAyD/SC83zfZr3pza0YQZpQlW51hLhZdgy1O3oju2j
|
||||
a/oizSGE4iX+hNv60eVTvGYcrO39OZTWM84Z6FEFp/l+o6b4PjDSqvQJvzwYbufRzJcIqgX68aS9
|
||||
1bjk8/pEPKyTZ0VN87Mx1q/fRfvLWlDMHaxakLefFcKHXZBNkKyA7fqEg5E74JDzBhkwzcxd4Em+
|
||||
Q8C1rZKZPMoeVq9jjctVCH1JcB/qH971JbfIF4UezqjbrwvZHPwXmF+DHMBqnQtsrbcto40j9JBP
|
||||
YIl3OjkkAksTD5WvLqHRJJhM2Cq0g/tHm1HVCJrkj99dl83r933yUVFZCqvXLaa5Uy/Jui1RCD10
|
||||
k0OlnZXkO18JnEbpQS3kn3P2yEELHlKj0PzbT2x+x2cI/KOMteBwZ0sKjinwOzP49q+f86LNt4r4
|
||||
MN94l8cXn4BjM8PDqhJ868nHp+G6lvA1LykuUtbWKwCHDm1pHoegS/mELcKqwvYw3qijhQb49LXS
|
||||
Qv38QHj/EUWDYPP4RHyWc4S3HrExX6ikw9dNn6jhsovxy9ugYPkaocGJqxc93RQwOXpnaj5RD5ha
|
||||
VEeU0SQLZSSWyeyJ2hF+9FdEtd3ssnFuKhVW+XDEO2mf//zaEUaf9hhuv/5Pcg3TQ87rgkmSqeWw
|
||||
ZmTXg3t/1KmtVh1bV6ewof9QPGzviiTh9Q/3BLp0TrAxArX++jMRcm/nFnLFzAYWkCGA6QtAUtb7
|
||||
GCw+T3n44+v4FKg5I17MweGavb/5EjRGLunPsBDtFYfZoctniQQpRJtRwjmd7JqpOKwgubRNKM2M
|
||||
sMULBPPH16TVLcHojGgolZS+PfI5stH4+nsbvploYV94NMnYLdYRjsJ5T6rooiai4NdHODuuRbGf
|
||||
b2qaKxoH98LFDO0wRWAezXsEJBZgeiHHepjrjojw61+wHyp0WL88AL75Vfj65nnMUPQeHbHq4ETL
|
||||
Jn/piKGiLy/TUJv0gb8GYgHNJOvozmmbhP0+r5YOHr42+Tuf9Q9XwVa4OXTPpZEh5Ju6g/zR2WOv
|
||||
nZV8PXJ5DA9NI2CXlzvjW69H9LqpEw2VMDWWHXc7Kz9/oyreNl+NRlrBfVePpFkmZtw6m4UgV8Mq
|
||||
7HrPHySH35VKmm8wdYrDka1oxSb0NYvHfrXxcnLHIIbF6FrUq7BRM0QeImqeLqTBh9Nz0dakEHZy
|
||||
X1Pn3ZfDYm/VAPTe7NMgO7/9mbNeLRTS7UCd5rkAohH9qLyrW0exqvYG2zfZDL95cUhOVyFZbbES
|
||||
EUVnh+rTbBsSdy56cOhs7ad/hnArahtyZ45gqx4sNivXoQJom9r0ui/2QMwcaVQoqzxazplSM+TE
|
||||
OnijXqeWMvrf/EZL0bfffvyfSMjJdKi825Ba7BSwxamYin55cL4GB7bcY9QqIO10fJGDlK22JgUQ
|
||||
lKJJvV3k+WJfzRHsHFr+8orhlzduXb27Ev6wUmOR4isHg/u7CuUn2ydsOtlH8PmQCxGa91zP7nrS
|
||||
oXoWJqyXnliT/pEEf/TBG7p9It03F/NPPaLDEfof2NozLMaYxxq7ewnF7JRC/0hpKKRnNqzumGaw
|
||||
0kiH9/PTZu2Zf83QRf02lHj3kQ/318eEvKDH2IhfLvtYhzn9w+96gLCxfMjEQTsxZfzl8YQv5FMF
|
||||
O7mr6a7JnZx1WdOClO3tbz3U9RhtphEuenIiYn5/Gn/yiuVT+2Tzjkdj3U1lBMEpeYXcxh6M+YIi
|
||||
EU1tNeOfP18e54cCz3T1Kf7q2S+/QnvhZOLd+3Vny/4DS3ASjB1ZyPuSzK2iiD9/EM7eWzFm8264
|
||||
qDSn8I/fX7GxbYGcFRY9hyli5Jt/QFfvr9gEj3lYVXutgO91Cb7nhgLGK1MzeMg6FyeX+7tezqXE
|
||||
wWPK8TgsZlaTV8oTcL+pMtVL7zxMDf/2lOWTFlhttHdOXiteYdJc+VDBVPDXZml4SA7ZE+8LXR66
|
||||
LD+78LmWI3UPrcXWzxg2kEHpTIOoOSWsePAz3GmeTeD391U/ZQE0ymZHz1f9CdaptTIkR6tElmNp
|
||||
JfypOhTocnEXsj++n4zB7S2EsnGbQp5Tc8D377sIhbhR6KXfLMZv3wEHax7pbk1gwurtSYbXqIrw
|
||||
7hIN+eqOUQom7HnY06TKYNZ88iBPtC01DrqbzPwnDhTQrQs2dzViU+z7LqBFvYTvcuzy1YnOMhgk
|
||||
hyOMFZdkdS21RT7gRWyRnBij4z0j+NVPfOYWM18Al0Po6kAi1Ll0xuxeNQiA9ulxoCQi+FjmuUV4
|
||||
z24hnezd1++7PGw33imUEz/214MSqODCZe9QENYgX4U1DeAj+NYvSrX8l3fLwD/LWO2fpbEcO71C
|
||||
uz3rqG5qTU0Kf9Sh5B5v9Ld/WM7lSf3pb/j4zvs/vClfrI5+87VE0gZVRYeGNmT0ZmlY7nHcwcl6
|
||||
D1j75n2ren92QIoPHrYGE4PVKfP+D//iiml+t13zHshcSL/1niZrDVCs7OTnEE4VvOTUfj1mcDg4
|
||||
H+wVymIsm4MOoWzcJ/zL0+a+hQq43O5PatySVz4tz7oBOy0WiPIcdcAnxctTfvnsrlfW5E++8C4I
|
||||
wvsKSvl4r2EPm6e6p0fwiAbRaDarYuP3gA1SaoakFDcRJqAC2C6umiG89Boi9tnnNBYcM5fCBhHQ
|
||||
r+5CHV/Xkt8+YKuepQk7n2Ie5msWRT++/PESWPpaaaAblGfqB3fozxLUPGRXqR7O+L43WOMIHfTu
|
||||
IML7c6gwejxhHTZlesA72XgOYyEpDTRgUuFgVUa2lOZNhoQTM9IluzZfKtcLlWu0EGqfX3VOu0zr
|
||||
0LnwImo0BBizeG9GtJy2ETVntwXLhK8EZndvoHrpekBYQE5gQiydqo2RGBIbzQqKO36mh5P/b96B
|
||||
mrw5UX9+W8O6VmYIvnpE02/eNnx58c/z9JtzNzSa7q/w2kQKtj4P/K3vhIP3XYq++y/69XfEg0ex
|
||||
LsKNqvb+4tJshDoaTIpzI2OEUw35l3dRYxt+8l8+BYe5L4nwuRs1PyNgw03e30KkNwmb1WDjgXJT
|
||||
Rli/hqO/vlI4wjqpql8+nSzhXTPB5yPYf3hysWMxUr7zGtve0U7EUwBU8Nv/ubwq+GtnyArK38UZ
|
||||
7yGrweJS1MFpPhNsDVBndFZfJQzxYSbbcyony6gqKgxxMmNnKmHOGvKswDev+O7/rEECPXrCctNu
|
||||
qJahOV8e7uTBVV308HFLTZ+f4qMK34UwYlyj7UCeR72CaEMkrJmyAFh2e1VQsHYadi6XE6Cn2ljR
|
||||
d/8UVviRAvbdb6KwCsQ/ebzIHEmF+40aU1V+7Q3GnbYEqOcWf/1A+W99+voN7KWylZB9qKswdMWI
|
||||
qht9TBizLh3Ye+cau7b8ydnJyzv41R9sSvcGzD8/7aHTjkST0IDhu39VNptaJO/fPBUv6oigIl+p
|
||||
fxy1WsjbqIHwYRZf3hj8oZc0FelXOcMnh9/m62/fsNnure/+McwXa/AL8N2X/clj6VACEXzrC5/k
|
||||
gzTQildX+GGD+eVFMxHmoy8rbk321K7yEbBNc8hgqus11me8JJ++XtvffhLfaOcwAaCtDj6RtsN7
|
||||
2r3BfHIeZ5RPXYpDVaDDeEfOCO99ufzqxV9tzDj4mLo9xWdnzv/4DS1PJxxGd7FerSxI4d+/UwH/
|
||||
9a+//vpfvxMGbXcrX9+DAVO5TP/x30cF/kP6j7HNXq8/xxDImFXl3//8+wTC35+haz/T/566pnyP
|
||||
f//zl4T+nDX4e+qm7PX/Xv/X96P+61//BwAA//8DAFT8PaLgIAAA
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8e9483a10d7c967f-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 27 Nov 2024 19:27:11 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- text-embedding-3-small
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '68'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '10000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '9999953'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_09708939ca92f32d9d7143e8b7843b12
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Information Agent.
|
||||
You have access to specific knowledge sources.\nYour personal goal is: Provide
|
||||
@@ -9,9 +420,10 @@ interactions:
|
||||
depends on it!"}, {"role": "user", "content": "\nCurrent Task: What is Brandon''s
|
||||
favorite color?\n\nThis is the expect criteria for your final answer: Brandon''s
|
||||
favorite color.\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}], "model":
|
||||
"gpt-4o-mini", "stop": ["\nObservation:"], "stream": false}'
|
||||
not a summary.Additional Information: Brandon''s favorite color is blue and
|
||||
he likes Mexican food.\n\nBegin! This is VERY important to you, use the tools
|
||||
available and give your best Final Answer, your job depends on it!\n\nThought:"}],
|
||||
"model": "gpt-4o-mini", "stop": ["\nObservation:"], "stream": false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -20,7 +432,7 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '931'
|
||||
- '1014'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
@@ -50,19 +462,19 @@ interactions:
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA4xSQW7bMBC86xULXnqxAtmxI1e3FEWBtJekCXJpC4GmVhIdapcgqbhN4L8HlB1L
|
||||
QVOgFwGa2RnOLPmcAAhdiQKEamVQnTXp5f3d9lsdbndh++C+757Or6/bm6uvn59WH/FGzKKCN1tU
|
||||
4VV1prizBoNmOtDKoQwYXef5+SK7WK3ny4HouEITZY0N6ZLTTpNOF9limWZ5Ol8f1S1rhV4U8CMB
|
||||
AHgevjEnVfhbFJDNXpEOvZcNiuI0BCAcm4gI6b32QVIQs5FUTAFpiH4FxDtQkqDRjwgSmhgbJPkd
|
||||
OoCf9EWTNHA5/BfwyUmqmD54qOUjOx0QFBt2oD1sTI9n02Mc1r2XsSr1xhzx/Sm34cY63vgjf8Jr
|
||||
Tdq3pUPpmWJGH9iKgd0nAL+G/fRvKgvruLOhDPyAFA3nF/nBT4zXMmHXRzJwkGaKr2bv+JUVBqmN
|
||||
n2xYKKlarEbpeB2yrzRPiGTS+u8073kfmmtq/sd+JJRCG7AqrcNKq7eNxzGH8dX+a+y05SGw8H98
|
||||
wK6sNTXorNOHN1PbMsuz1aZe5yoTyT55AQAA//8DAPaYLdRBAwAA
|
||||
H4sIAAAAAAAAA4xSwY7TMBS85yuefOHSrNJ0l1S5bVdCLHAHBChy7Zf0geNnbGeX1ar/jpxmm1SA
|
||||
xCVSZt6MZ579nAEI0qIGoQ4yqt6Z/Pbz26efjNXW+0/64917r82H3W64f9fpOxKrpOD9d1TxRXWl
|
||||
uHcGI7E90cqjjJhc19WmrDY3rzflSPSs0SRZ52J+zXlPlvKyKK/zosrX20l9YFIYRA1fMgCA5/Gb
|
||||
clqNv0QNxeoF6TEE2aGoz0MAwrNJiJAhUIjSRrGaScU2oh2j34PlR1DSQkcPCBK6FBukDY/oAb7a
|
||||
N2Slgdvxv4adl1azfRWglQ/sKSIoNuyBAuzNgFfLYzy2Q5Cpqh2MmfDjObfhznneh4k/4y1ZCofG
|
||||
owxsU8YQ2YmRPWYA38b9DBeVhfPcu9hE/oE2Ga635clPzNeyZCcycpRmxsti2uqlX6MxSjJhsWGh
|
||||
pDqgnqXzdchBEy+IbNH6zzR/8z41J9v9j/1MKIUuom6cR03qsvE85jG92n+Nnbc8BhbhKUTsm5Zs
|
||||
h955Or2Z1jVFVdzs222lCpEds98AAAD//wMAfDYBg0EDAAA=
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8e54a2a7d81467f7-SJC
|
||||
- 8e9483a44b2fcf51-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -70,14 +482,14 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 20 Nov 2024 01:23:34 GMT
|
||||
- Wed, 27 Nov 2024 19:27:12 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=DoHo1Z11nN9bxkwZmJGnaxRhyrWE0UfyimYuUVRU6A4-1732065814-1.0.1.1-JVRvFrIJLHEq9OaFQS0qcgYcawE7t2XQ4Tpqd58n2Yfx3mvEqD34MJmooi1LtvdvjB2J8x1Rs.rCdXD.msLlKw;
|
||||
path=/; expires=Wed, 20-Nov-24 01:53:34 GMT; domain=.api.openai.com; HttpOnly;
|
||||
- __cf_bm=pBzYx.9r7fU6srtt2lLWBrgojr5QFAfVuDKoOwUKCK4-1732735632-1.0.1.1-jYgG33D0s.RUVr6OV4fPXS7bQR9Yp5AwbbIAqdxaZCrcisNIYqPqOqxNO9.Lo3Ok7K8FXfSBrrnAOOJDVLa6bA;
|
||||
path=/; expires=Wed, 27-Nov-24 19:57:12 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=n3RrNhFMqC3HtJ7n3e3agyxnM1YOQ6eKESz_eeXLtZA-1732065814630-0.0.1.1-604800000;
|
||||
- _cfuvid=TYAi3OpktKJu15t1e4y3VbRnbHK6QYaCeSYJuT6e5Sk-1732735632634-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
@@ -90,7 +502,7 @@ interactions:
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '344'
|
||||
- '535'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -102,13 +514,13 @@ interactions:
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999790'
|
||||
- '149999769'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_8f1622677c64913753a595f679596614
|
||||
- req_8501f29c09575f05c51fdec5c1c36090
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
|
||||
@@ -43,10 +43,11 @@ class TestAuthenticationCommand(unittest.TestCase):
|
||||
mock_print.assert_any_call("2. Enter the following code: ", "ABCDEF")
|
||||
mock_open.assert_called_once_with("https://example.com")
|
||||
|
||||
@patch("crewai.cli.authentication.main.ToolCommand")
|
||||
@patch("crewai.cli.authentication.main.requests.post")
|
||||
@patch("crewai.cli.authentication.main.validate_token")
|
||||
@patch("crewai.cli.authentication.main.console.print")
|
||||
def test_poll_for_token_success(self, mock_print, mock_validate_token, mock_post):
|
||||
def test_poll_for_token_success(self, mock_print, mock_validate_token, mock_post, mock_tool):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {
|
||||
@@ -55,10 +56,13 @@ class TestAuthenticationCommand(unittest.TestCase):
|
||||
}
|
||||
mock_post.return_value = mock_response
|
||||
|
||||
mock_instance = mock_tool.return_value
|
||||
mock_instance.login.return_value = None
|
||||
|
||||
self.auth_command._poll_for_token({"device_code": "123456"})
|
||||
|
||||
mock_validate_token.assert_called_once_with("TOKEN")
|
||||
mock_print.assert_called_once_with("\nWelcome to CrewAI+ !!", style="green")
|
||||
mock_print.assert_called_once_with("\n[bold green]Welcome to CrewAI Enterprise![/bold green]\n")
|
||||
|
||||
@patch("crewai.cli.authentication.main.requests.post")
|
||||
@patch("crewai.cli.authentication.main.console.print")
|
||||
|
||||
@@ -260,6 +260,6 @@ class TestDeployCommand(unittest.TestCase):
|
||||
self.assertEqual(project_name, "test_project")
|
||||
|
||||
def test_get_crewai_version(self):
|
||||
from crewai.cli.utils import get_crewai_version
|
||||
from crewai.cli.version import get_crewai_version
|
||||
|
||||
assert isinstance(get_crewai_version(), str)
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
from typing import Callable
|
||||
|
||||
from crewai.tools import BaseTool, tool
|
||||
|
||||
|
||||
@@ -21,8 +22,7 @@ def test_creating_a_tool_using_annotation():
|
||||
my_tool.func("What is the meaning of life?") == "What is the meaning of life?"
|
||||
)
|
||||
|
||||
# Assert the langchain tool conversion worked as expected
|
||||
converted_tool = my_tool.to_langchain()
|
||||
converted_tool = my_tool.to_structured_tool()
|
||||
assert converted_tool.name == "Name of my tool"
|
||||
|
||||
assert (
|
||||
@@ -41,9 +41,7 @@ def test_creating_a_tool_using_annotation():
|
||||
def test_creating_a_tool_using_baseclass():
|
||||
class MyCustomTool(BaseTool):
|
||||
name: str = "Name of my tool"
|
||||
description: str = (
|
||||
"Clear description for what this tool is useful for, you agent will need this information to use it."
|
||||
)
|
||||
description: str = "Clear description for what this tool is useful for, you agent will need this information to use it."
|
||||
|
||||
def _run(self, question: str) -> str:
|
||||
return question
|
||||
@@ -61,8 +59,7 @@ def test_creating_a_tool_using_baseclass():
|
||||
}
|
||||
assert my_tool.run("What is the meaning of life?") == "What is the meaning of life?"
|
||||
|
||||
# Assert the langchain tool conversion worked as expected
|
||||
converted_tool = my_tool.to_langchain()
|
||||
converted_tool = my_tool.to_structured_tool()
|
||||
assert converted_tool.name == "Name of my tool"
|
||||
|
||||
assert (
|
||||
@@ -73,7 +70,7 @@ def test_creating_a_tool_using_baseclass():
|
||||
"question": {"title": "Question", "type": "string"}
|
||||
}
|
||||
assert (
|
||||
converted_tool.run("What is the meaning of life?")
|
||||
converted_tool._run("What is the meaning of life?")
|
||||
== "What is the meaning of life?"
|
||||
)
|
||||
|
||||
@@ -81,9 +78,7 @@ def test_creating_a_tool_using_baseclass():
|
||||
def test_setting_cache_function():
|
||||
class MyCustomTool(BaseTool):
|
||||
name: str = "Name of my tool"
|
||||
description: str = (
|
||||
"Clear description for what this tool is useful for, you agent will need this information to use it."
|
||||
)
|
||||
description: str = "Clear description for what this tool is useful for, you agent will need this information to use it."
|
||||
cache_function: Callable = lambda: False
|
||||
|
||||
def _run(self, question: str) -> str:
|
||||
@@ -97,9 +92,7 @@ def test_setting_cache_function():
|
||||
def test_default_cache_function_is_true():
|
||||
class MyCustomTool(BaseTool):
|
||||
name: str = "Name of my tool"
|
||||
description: str = (
|
||||
"Clear description for what this tool is useful for, you agent will need this information to use it."
|
||||
)
|
||||
description: str = "Clear description for what this tool is useful for, you agent will need this information to use it."
|
||||
|
||||
def _run(self, question: str) -> str:
|
||||
return question
|
||||
|
||||
146
tests/tools/test_structured_tool.py
Normal file
146
tests/tools/test_structured_tool.py
Normal file
@@ -0,0 +1,146 @@
|
||||
from typing import Optional
|
||||
|
||||
import pytest
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.tools.structured_tool import CrewStructuredTool
|
||||
|
||||
|
||||
# Test fixtures
|
||||
@pytest.fixture
|
||||
def basic_function():
|
||||
def test_func(param1: str, param2: int = 0) -> str:
|
||||
"""Test function with basic params."""
|
||||
return f"{param1} {param2}"
|
||||
|
||||
return test_func
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def schema_class():
|
||||
class TestSchema(BaseModel):
|
||||
param1: str
|
||||
param2: int = Field(default=0)
|
||||
|
||||
return TestSchema
|
||||
|
||||
|
||||
class TestCrewStructuredTool:
|
||||
def test_initialization(self, basic_function, schema_class):
|
||||
"""Test basic initialization of CrewStructuredTool"""
|
||||
tool = CrewStructuredTool(
|
||||
name="test_tool",
|
||||
description="Test tool description",
|
||||
func=basic_function,
|
||||
args_schema=schema_class,
|
||||
)
|
||||
|
||||
assert tool.name == "test_tool"
|
||||
assert tool.description == "Test tool description"
|
||||
assert tool.func == basic_function
|
||||
assert tool.args_schema == schema_class
|
||||
|
||||
def test_from_function(self, basic_function):
|
||||
"""Test creating tool from function"""
|
||||
tool = CrewStructuredTool.from_function(
|
||||
func=basic_function, name="test_tool", description="Test description"
|
||||
)
|
||||
|
||||
assert tool.name == "test_tool"
|
||||
assert tool.description == "Test description"
|
||||
assert tool.func == basic_function
|
||||
assert isinstance(tool.args_schema, type(BaseModel))
|
||||
|
||||
def test_validate_function_signature(self, basic_function, schema_class):
|
||||
"""Test function signature validation"""
|
||||
tool = CrewStructuredTool(
|
||||
name="test_tool",
|
||||
description="Test tool",
|
||||
func=basic_function,
|
||||
args_schema=schema_class,
|
||||
)
|
||||
|
||||
# Should not raise any exceptions
|
||||
tool._validate_function_signature()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_ainvoke(self, basic_function):
|
||||
"""Test asynchronous invocation"""
|
||||
tool = CrewStructuredTool.from_function(func=basic_function, name="test_tool")
|
||||
|
||||
result = await tool.ainvoke(input={"param1": "test"})
|
||||
assert result == "test 0"
|
||||
|
||||
def test_parse_args_dict(self, basic_function):
|
||||
"""Test parsing dictionary arguments"""
|
||||
tool = CrewStructuredTool.from_function(func=basic_function, name="test_tool")
|
||||
|
||||
parsed = tool._parse_args({"param1": "test", "param2": 42})
|
||||
assert parsed["param1"] == "test"
|
||||
assert parsed["param2"] == 42
|
||||
|
||||
def test_parse_args_string(self, basic_function):
|
||||
"""Test parsing string arguments"""
|
||||
tool = CrewStructuredTool.from_function(func=basic_function, name="test_tool")
|
||||
|
||||
parsed = tool._parse_args('{"param1": "test", "param2": 42}')
|
||||
assert parsed["param1"] == "test"
|
||||
assert parsed["param2"] == 42
|
||||
|
||||
def test_complex_types(self):
|
||||
"""Test handling of complex parameter types"""
|
||||
|
||||
def complex_func(nested: dict, items: list) -> str:
|
||||
"""Process complex types."""
|
||||
return f"Processed {len(items)} items with {len(nested)} nested keys"
|
||||
|
||||
tool = CrewStructuredTool.from_function(
|
||||
func=complex_func, name="test_tool", description="Test complex types"
|
||||
)
|
||||
result = tool.invoke({"nested": {"key": "value"}, "items": [1, 2, 3]})
|
||||
assert result == "Processed 3 items with 1 nested keys"
|
||||
|
||||
def test_schema_inheritance(self):
|
||||
"""Test tool creation with inherited schema"""
|
||||
|
||||
def extended_func(base_param: str, extra_param: int) -> str:
|
||||
"""Test function with inherited schema."""
|
||||
return f"{base_param} {extra_param}"
|
||||
|
||||
class BaseSchema(BaseModel):
|
||||
base_param: str
|
||||
|
||||
class ExtendedSchema(BaseSchema):
|
||||
extra_param: int
|
||||
|
||||
tool = CrewStructuredTool.from_function(
|
||||
func=extended_func, name="test_tool", args_schema=ExtendedSchema
|
||||
)
|
||||
|
||||
result = tool.invoke({"base_param": "test", "extra_param": 42})
|
||||
assert result == "test 42"
|
||||
|
||||
def test_default_values_in_schema(self):
|
||||
"""Test handling of default values in schema"""
|
||||
|
||||
def default_func(
|
||||
required_param: str,
|
||||
optional_param: str = "default",
|
||||
nullable_param: Optional[int] = None,
|
||||
) -> str:
|
||||
"""Test function with default values."""
|
||||
return f"{required_param} {optional_param} {nullable_param}"
|
||||
|
||||
tool = CrewStructuredTool.from_function(
|
||||
func=default_func, name="test_tool", description="Test defaults"
|
||||
)
|
||||
|
||||
# Test with minimal parameters
|
||||
result = tool.invoke({"required_param": "test"})
|
||||
assert result == "test default None"
|
||||
|
||||
# Test with all parameters
|
||||
result = tool.invoke(
|
||||
{"required_param": "test", "optional_param": "custom", "nullable_param": 42}
|
||||
)
|
||||
assert result == "test custom 42"
|
||||
@@ -1,7 +1,10 @@
|
||||
import json
|
||||
from typing import Dict, List, Optional
|
||||
from unittest.mock import MagicMock, Mock, patch
|
||||
|
||||
import pytest
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.llm import LLM
|
||||
from crewai.utilities.converter import (
|
||||
Converter,
|
||||
@@ -9,12 +12,11 @@ from crewai.utilities.converter import (
|
||||
convert_to_model,
|
||||
convert_with_instructions,
|
||||
create_converter,
|
||||
generate_model_description,
|
||||
get_conversion_instructions,
|
||||
handle_partial_json,
|
||||
validate_model,
|
||||
)
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
|
||||
|
||||
|
||||
@@ -269,3 +271,45 @@ def test_create_converter_fails_without_agent_or_converter_cls():
|
||||
create_converter(
|
||||
llm=Mock(), text="Sample", model=SimpleModel, instructions="Convert"
|
||||
)
|
||||
|
||||
|
||||
def test_generate_model_description_simple_model():
|
||||
description = generate_model_description(SimpleModel)
|
||||
expected_description = '{\n "name": str,\n "age": int\n}'
|
||||
assert description == expected_description
|
||||
|
||||
|
||||
def test_generate_model_description_nested_model():
|
||||
description = generate_model_description(NestedModel)
|
||||
expected_description = (
|
||||
'{\n "id": int,\n "data": {\n "name": str,\n "age": int\n}\n}'
|
||||
)
|
||||
assert description == expected_description
|
||||
|
||||
|
||||
def test_generate_model_description_optional_field():
|
||||
class ModelWithOptionalField(BaseModel):
|
||||
name: Optional[str]
|
||||
age: int
|
||||
|
||||
description = generate_model_description(ModelWithOptionalField)
|
||||
expected_description = '{\n "name": Optional[str],\n "age": int\n}'
|
||||
assert description == expected_description
|
||||
|
||||
|
||||
def test_generate_model_description_list_field():
|
||||
class ModelWithListField(BaseModel):
|
||||
items: List[int]
|
||||
|
||||
description = generate_model_description(ModelWithListField)
|
||||
expected_description = '{\n "items": List[int]\n}'
|
||||
assert description == expected_description
|
||||
|
||||
|
||||
def test_generate_model_description_dict_field():
|
||||
class ModelWithDictField(BaseModel):
|
||||
attributes: Dict[str, int]
|
||||
|
||||
description = generate_model_description(ModelWithDictField)
|
||||
expected_description = '{\n "attributes": Dict[str, int]\n}'
|
||||
assert description == expected_description
|
||||
|
||||
2
uv.lock
generated
2
uv.lock
generated
@@ -619,7 +619,6 @@ dependencies = [
|
||||
{ name = "instructor" },
|
||||
{ name = "json-repair" },
|
||||
{ name = "jsonref" },
|
||||
{ name = "langchain" },
|
||||
{ name = "litellm" },
|
||||
{ name = "openai" },
|
||||
{ name = "openpyxl" },
|
||||
@@ -692,7 +691,6 @@ requires-dist = [
|
||||
{ name = "instructor", specifier = ">=1.3.3" },
|
||||
{ name = "json-repair", specifier = ">=0.25.2" },
|
||||
{ name = "jsonref", specifier = ">=1.1.0" },
|
||||
{ name = "langchain", specifier = ">=0.2.16" },
|
||||
{ name = "litellm", specifier = ">=1.44.22" },
|
||||
{ name = "mem0ai", marker = "extra == 'mem0'", specifier = ">=0.1.29" },
|
||||
{ name = "openai", specifier = ">=1.13.3" },
|
||||
|
||||
Reference in New Issue
Block a user