mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-18 13:28:31 +00:00
Compare commits
1 Commits
feat/impro
...
bugfix/add
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
54acbc9d0e |
44
.github/workflows/tests.yml
vendored
44
.github/workflows/tests.yml
vendored
@@ -1,32 +1,60 @@
|
|||||||
name: Run Tests
|
name: Run Tests
|
||||||
|
|
||||||
on: [pull_request]
|
on:
|
||||||
|
pull_request:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: write
|
contents: write
|
||||||
|
|
||||||
env:
|
|
||||||
OPENAI_API_KEY: fake-api-key
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
tests:
|
tests:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 15
|
timeout-minutes: 15
|
||||||
|
env:
|
||||||
|
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||||
|
MODEL: gpt-4o-mini
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Install uv
|
- name: Install UV
|
||||||
uses: astral-sh/setup-uv@v3
|
uses: astral-sh/setup-uv@v3
|
||||||
with:
|
with:
|
||||||
enable-cache: true
|
enable-cache: true
|
||||||
|
|
||||||
|
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
run: uv python install 3.12.8
|
run: uv python install 3.12.8
|
||||||
|
|
||||||
- name: Install the project
|
- name: Install the project
|
||||||
run: uv sync --dev --all-extras
|
run: uv sync --dev --all-extras
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run General Tests
|
||||||
run: uv run pytest tests -vv
|
run: uv run pytest tests -k "not main_branch_tests" -vv
|
||||||
|
|
||||||
|
main_branch_tests:
|
||||||
|
if: github.ref == 'refs/heads/main'
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: tests
|
||||||
|
timeout-minutes: 15
|
||||||
|
env:
|
||||||
|
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Install UV
|
||||||
|
uses: astral-sh/setup-uv@v3
|
||||||
|
with:
|
||||||
|
enable-cache: true
|
||||||
|
|
||||||
|
- name: Set up Python
|
||||||
|
run: uv python install 3.12.8
|
||||||
|
|
||||||
|
- name: Install the project
|
||||||
|
run: uv sync --dev --all-extras
|
||||||
|
|
||||||
|
- name: Run Main Branch Specific Tests
|
||||||
|
run: uv run pytest tests/main_branch_tests -vv
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ From this point on, your crew will have planning enabled, and the tasks will be
|
|||||||
|
|
||||||
#### Planning LLM
|
#### Planning LLM
|
||||||
|
|
||||||
Now you can define the LLM that will be used to plan the tasks.
|
Now you can define the LLM that will be used to plan the tasks. You can use any ChatOpenAI LLM model available.
|
||||||
|
|
||||||
When running the base case example, you will see something like the output below, which represents the output of the `AgentPlanner`
|
When running the base case example, you will see something like the output below, which represents the output of the `AgentPlanner`
|
||||||
responsible for creating the step-by-step logic to add to the Agents' tasks.
|
responsible for creating the step-by-step logic to add to the Agents' tasks.
|
||||||
@@ -39,6 +39,7 @@ responsible for creating the step-by-step logic to add to the Agents' tasks.
|
|||||||
<CodeGroup>
|
<CodeGroup>
|
||||||
```python Code
|
```python Code
|
||||||
from crewai import Crew, Agent, Task, Process
|
from crewai import Crew, Agent, Task, Process
|
||||||
|
from langchain_openai import ChatOpenAI
|
||||||
|
|
||||||
# Assemble your crew with planning capabilities and custom LLM
|
# Assemble your crew with planning capabilities and custom LLM
|
||||||
my_crew = Crew(
|
my_crew = Crew(
|
||||||
@@ -46,7 +47,7 @@ my_crew = Crew(
|
|||||||
tasks=self.tasks,
|
tasks=self.tasks,
|
||||||
process=Process.sequential,
|
process=Process.sequential,
|
||||||
planning=True,
|
planning=True,
|
||||||
planning_llm="gpt-4o"
|
planning_llm=ChatOpenAI(model="gpt-4o")
|
||||||
)
|
)
|
||||||
|
|
||||||
# Run the crew
|
# Run the crew
|
||||||
|
|||||||
@@ -23,7 +23,9 @@ Processes enable individual agents to operate as a cohesive unit, streamlining t
|
|||||||
To assign a process to a crew, specify the process type upon crew creation to set the execution strategy. For a hierarchical process, ensure to define `manager_llm` or `manager_agent` for the manager agent.
|
To assign a process to a crew, specify the process type upon crew creation to set the execution strategy. For a hierarchical process, ensure to define `manager_llm` or `manager_agent` for the manager agent.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from crewai import Crew, Process
|
from crewai import Crew
|
||||||
|
from crewai.process import Process
|
||||||
|
from langchain_openai import ChatOpenAI
|
||||||
|
|
||||||
# Example: Creating a crew with a sequential process
|
# Example: Creating a crew with a sequential process
|
||||||
crew = Crew(
|
crew = Crew(
|
||||||
@@ -38,7 +40,7 @@ crew = Crew(
|
|||||||
agents=my_agents,
|
agents=my_agents,
|
||||||
tasks=my_tasks,
|
tasks=my_tasks,
|
||||||
process=Process.hierarchical,
|
process=Process.hierarchical,
|
||||||
manager_llm="gpt-4o"
|
manager_llm=ChatOpenAI(model="gpt-4")
|
||||||
# or
|
# or
|
||||||
# manager_agent=my_manager_agent
|
# manager_agent=my_manager_agent
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -150,20 +150,15 @@ There are two main ways for one to create a CrewAI tool:
|
|||||||
|
|
||||||
```python Code
|
```python Code
|
||||||
from crewai.tools import BaseTool
|
from crewai.tools import BaseTool
|
||||||
from pydantic import BaseModel, Field
|
|
||||||
|
|
||||||
class MyToolInput(BaseModel):
|
|
||||||
"""Input schema for MyCustomTool."""
|
|
||||||
argument: str = Field(..., description="Description of the argument.")
|
|
||||||
|
|
||||||
class MyCustomTool(BaseTool):
|
class MyCustomTool(BaseTool):
|
||||||
name: str = "Name of my tool"
|
name: str = "Name of my tool"
|
||||||
description: str = "What this tool does. It's vital for effective utilization."
|
description: str = "Clear description for what this tool is useful for, your agent will need this information to use it."
|
||||||
args_schema: Type[BaseModel] = MyToolInput
|
|
||||||
|
|
||||||
def _run(self, argument: str) -> str:
|
def _run(self, argument: str) -> str:
|
||||||
# Your tool's logic here
|
# Implementation goes here
|
||||||
return "Tool's result"
|
return "Result from custom tool"
|
||||||
```
|
```
|
||||||
|
|
||||||
### Utilizing the `tool` Decorator
|
### Utilizing the `tool` Decorator
|
||||||
|
|||||||
@@ -73,9 +73,9 @@ result = crew.kickoff()
|
|||||||
If you're using the hierarchical process and don't want to set a custom manager agent, you can specify the language model for the manager:
|
If you're using the hierarchical process and don't want to set a custom manager agent, you can specify the language model for the manager:
|
||||||
|
|
||||||
```python Code
|
```python Code
|
||||||
from crewai import LLM
|
from langchain_openai import ChatOpenAI
|
||||||
|
|
||||||
manager_llm = LLM(model="gpt-4o")
|
manager_llm = ChatOpenAI(model_name="gpt-4")
|
||||||
|
|
||||||
crew = Crew(
|
crew = Crew(
|
||||||
agents=[researcher, writer],
|
agents=[researcher, writer],
|
||||||
|
|||||||
@@ -301,166 +301,38 @@ Use the annotations to properly reference the agent and task in the `crew.py` fi
|
|||||||
|
|
||||||
### Annotations include:
|
### Annotations include:
|
||||||
|
|
||||||
Here are examples of how to use each annotation in your CrewAI project, and when you should use them:
|
* `@agent`
|
||||||
|
* `@task`
|
||||||
|
* `@crew`
|
||||||
|
* `@tool`
|
||||||
|
* `@before_kickoff`
|
||||||
|
* `@after_kickoff`
|
||||||
|
* `@callback`
|
||||||
|
* `@output_json`
|
||||||
|
* `@output_pydantic`
|
||||||
|
* `@cache_handler`
|
||||||
|
|
||||||
#### @agent
|
```python crew.py
|
||||||
Used to define an agent in your crew. Use this when:
|
# ...
|
||||||
- You need to create a specialized AI agent with a specific role
|
|
||||||
- You want the agent to be automatically collected and managed by the crew
|
|
||||||
- You need to reuse the same agent configuration across multiple tasks
|
|
||||||
|
|
||||||
```python
|
|
||||||
@agent
|
@agent
|
||||||
def research_agent(self) -> Agent:
|
def email_summarizer(self) -> Agent:
|
||||||
return Agent(
|
return Agent(
|
||||||
role="Research Analyst",
|
config=self.agents_config["email_summarizer"],
|
||||||
goal="Conduct thorough research on given topics",
|
|
||||||
backstory="Expert researcher with years of experience in data analysis",
|
|
||||||
tools=[SerperDevTool()],
|
|
||||||
verbose=True
|
|
||||||
)
|
)
|
||||||
```
|
|
||||||
|
|
||||||
#### @task
|
|
||||||
Used to define a task that can be executed by agents. Use this when:
|
|
||||||
- You need to define a specific piece of work for an agent
|
|
||||||
- You want tasks to be automatically sequenced and managed
|
|
||||||
- You need to establish dependencies between different tasks
|
|
||||||
|
|
||||||
```python
|
|
||||||
@task
|
@task
|
||||||
def research_task(self) -> Task:
|
def email_summarizer_task(self) -> Task:
|
||||||
return Task(
|
return Task(
|
||||||
description="Research the latest developments in AI technology",
|
config=self.tasks_config["email_summarizer_task"],
|
||||||
expected_output="A comprehensive report on AI advancements",
|
|
||||||
agent=self.research_agent(),
|
|
||||||
output_file="output/research.md"
|
|
||||||
)
|
)
|
||||||
|
# ...
|
||||||
```
|
```
|
||||||
|
|
||||||
#### @crew
|
<Tip>
|
||||||
Used to define your crew configuration. Use this when:
|
In addition to the [sequential process](../how-to/sequential-process), you can use the [hierarchical process](../how-to/hierarchical-process),
|
||||||
- You want to automatically collect all @agent and @task definitions
|
which automatically assigns a manager to the defined crew to properly coordinate the planning and execution of tasks through delegation and validation of results.
|
||||||
- You need to specify how tasks should be processed (sequential or hierarchical)
|
You can learn more about the core concepts [here](/concepts).
|
||||||
- You want to set up crew-wide configurations
|
</Tip>
|
||||||
|
|
||||||
```python
|
|
||||||
@crew
|
|
||||||
def research_crew(self) -> Crew:
|
|
||||||
return Crew(
|
|
||||||
agents=self.agents, # Automatically collected from @agent methods
|
|
||||||
tasks=self.tasks, # Automatically collected from @task methods
|
|
||||||
process=Process.sequential,
|
|
||||||
verbose=True
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
#### @tool
|
|
||||||
Used to create custom tools for your agents. Use this when:
|
|
||||||
- You need to give agents specific capabilities (like web search, data analysis)
|
|
||||||
- You want to encapsulate external API calls or complex operations
|
|
||||||
- You need to share functionality across multiple agents
|
|
||||||
|
|
||||||
```python
|
|
||||||
@tool
|
|
||||||
def web_search_tool(query: str, max_results: int = 5) -> list[str]:
|
|
||||||
"""
|
|
||||||
Search the web for information.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
query: The search query
|
|
||||||
max_results: Maximum number of results to return
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of search results
|
|
||||||
"""
|
|
||||||
# Implement your search logic here
|
|
||||||
return [f"Result {i} for: {query}" for i in range(max_results)]
|
|
||||||
```
|
|
||||||
|
|
||||||
#### @before_kickoff
|
|
||||||
Used to execute logic before the crew starts. Use this when:
|
|
||||||
- You need to validate or preprocess input data
|
|
||||||
- You want to set up resources or configurations before execution
|
|
||||||
- You need to perform any initialization logic
|
|
||||||
|
|
||||||
```python
|
|
||||||
@before_kickoff
|
|
||||||
def validate_inputs(self, inputs: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
|
|
||||||
"""Validate and preprocess inputs before the crew starts."""
|
|
||||||
if inputs is None:
|
|
||||||
return None
|
|
||||||
|
|
||||||
if 'topic' not in inputs:
|
|
||||||
raise ValueError("Topic is required")
|
|
||||||
|
|
||||||
# Add additional context
|
|
||||||
inputs['timestamp'] = datetime.now().isoformat()
|
|
||||||
inputs['topic'] = inputs['topic'].strip().lower()
|
|
||||||
return inputs
|
|
||||||
```
|
|
||||||
|
|
||||||
#### @after_kickoff
|
|
||||||
Used to process results after the crew completes. Use this when:
|
|
||||||
- You need to format or transform the final output
|
|
||||||
- You want to perform cleanup operations
|
|
||||||
- You need to save or log the results in a specific way
|
|
||||||
|
|
||||||
```python
|
|
||||||
@after_kickoff
|
|
||||||
def process_results(self, result: CrewOutput) -> CrewOutput:
|
|
||||||
"""Process and format the results after the crew completes."""
|
|
||||||
result.raw = result.raw.strip()
|
|
||||||
result.raw = f"""
|
|
||||||
# Research Results
|
|
||||||
Generated on: {datetime.now().isoformat()}
|
|
||||||
|
|
||||||
{result.raw}
|
|
||||||
"""
|
|
||||||
return result
|
|
||||||
```
|
|
||||||
|
|
||||||
#### @callback
|
|
||||||
Used to handle events during crew execution. Use this when:
|
|
||||||
- You need to monitor task progress
|
|
||||||
- You want to log intermediate results
|
|
||||||
- You need to implement custom progress tracking or metrics
|
|
||||||
|
|
||||||
```python
|
|
||||||
@callback
|
|
||||||
def log_task_completion(self, task: Task, output: str):
|
|
||||||
"""Log task completion details for monitoring."""
|
|
||||||
print(f"Task '{task.description}' completed")
|
|
||||||
print(f"Output length: {len(output)} characters")
|
|
||||||
print(f"Agent used: {task.agent.role}")
|
|
||||||
print("-" * 50)
|
|
||||||
```
|
|
||||||
|
|
||||||
#### @cache_handler
|
|
||||||
Used to implement custom caching for task results. Use this when:
|
|
||||||
- You want to avoid redundant expensive operations
|
|
||||||
- You need to implement custom cache storage or expiration logic
|
|
||||||
- You want to persist results between runs
|
|
||||||
|
|
||||||
```python
|
|
||||||
@cache_handler
|
|
||||||
def custom_cache(self, key: str) -> Optional[str]:
|
|
||||||
"""Custom cache implementation for storing task results."""
|
|
||||||
cache_file = f"cache/{key}.json"
|
|
||||||
|
|
||||||
if os.path.exists(cache_file):
|
|
||||||
with open(cache_file, 'r') as f:
|
|
||||||
data = json.load(f)
|
|
||||||
# Check if cache is still valid (e.g., not expired)
|
|
||||||
if datetime.fromisoformat(data['timestamp']) > datetime.now() - timedelta(days=1):
|
|
||||||
return data['result']
|
|
||||||
return None
|
|
||||||
```
|
|
||||||
|
|
||||||
<Note>
|
|
||||||
These decorators are part of the CrewAI framework and help organize your crew's structure by automatically collecting agents, tasks, and handling various lifecycle events.
|
|
||||||
They should be used within a class decorated with `@CrewBase`.
|
|
||||||
</Note>
|
|
||||||
|
|
||||||
### Replay Tasks from Latest Crew Kickoff
|
### Replay Tasks from Latest Crew Kickoff
|
||||||
|
|
||||||
|
|||||||
@@ -86,7 +86,7 @@ class Agent(BaseAgent):
|
|||||||
llm: Union[str, InstanceOf[LLM], Any] = Field(
|
llm: Union[str, InstanceOf[LLM], Any] = Field(
|
||||||
description="Language model that will run the agent.", default=None
|
description="Language model that will run the agent.", default=None
|
||||||
)
|
)
|
||||||
function_calling_llm: Optional[Union[str, InstanceOf[LLM], Any]] = Field(
|
function_calling_llm: Optional[Any] = Field(
|
||||||
description="Language model that will run the agent.", default=None
|
description="Language model that will run the agent.", default=None
|
||||||
)
|
)
|
||||||
system_template: Optional[str] = Field(
|
system_template: Optional[str] = Field(
|
||||||
@@ -142,8 +142,7 @@ class Agent(BaseAgent):
|
|||||||
self.agent_ops_agent_name = self.role
|
self.agent_ops_agent_name = self.role
|
||||||
|
|
||||||
self.llm = create_llm(self.llm)
|
self.llm = create_llm(self.llm)
|
||||||
if self.function_calling_llm and not isinstance(self.function_calling_llm, LLM):
|
self.function_calling_llm = create_llm(self.function_calling_llm)
|
||||||
self.function_calling_llm = create_llm(self.function_calling_llm)
|
|
||||||
|
|
||||||
if not self.agent_executor:
|
if not self.agent_executor:
|
||||||
self._setup_agent_executor()
|
self._setup_agent_executor()
|
||||||
|
|||||||
@@ -145,6 +145,8 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
if self._is_context_length_exceeded(e):
|
if self._is_context_length_exceeded(e):
|
||||||
self._handle_context_length()
|
self._handle_context_length()
|
||||||
continue
|
continue
|
||||||
|
else:
|
||||||
|
raise e
|
||||||
|
|
||||||
self._show_logs(formatted_answer)
|
self._show_logs(formatted_answer)
|
||||||
return formatted_answer
|
return formatted_answer
|
||||||
@@ -314,7 +316,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
agent=self.agent,
|
agent=self.agent,
|
||||||
action=agent_action,
|
action=agent_action,
|
||||||
)
|
)
|
||||||
tool_calling = tool_usage.parse_tool_calling(agent_action.text)
|
tool_calling = tool_usage.parse(agent_action.text)
|
||||||
|
|
||||||
if isinstance(tool_calling, ToolUsageErrorException):
|
if isinstance(tool_calling, ToolUsageErrorException):
|
||||||
tool_result = tool_calling.message
|
tool_result = tool_calling.message
|
||||||
|
|||||||
@@ -47,7 +47,6 @@ from crewai.utilities.formatter import (
|
|||||||
aggregate_raw_outputs_from_task_outputs,
|
aggregate_raw_outputs_from_task_outputs,
|
||||||
aggregate_raw_outputs_from_tasks,
|
aggregate_raw_outputs_from_tasks,
|
||||||
)
|
)
|
||||||
from crewai.utilities.llm_utils import create_llm
|
|
||||||
from crewai.utilities.planning_handler import CrewPlanner
|
from crewai.utilities.planning_handler import CrewPlanner
|
||||||
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
|
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
|
||||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||||
@@ -150,7 +149,7 @@ class Crew(BaseModel):
|
|||||||
manager_agent: Optional[BaseAgent] = Field(
|
manager_agent: Optional[BaseAgent] = Field(
|
||||||
description="Custom agent that will be used as manager.", default=None
|
description="Custom agent that will be used as manager.", default=None
|
||||||
)
|
)
|
||||||
function_calling_llm: Optional[Union[str, InstanceOf[LLM], Any]] = Field(
|
function_calling_llm: Optional[Any] = Field(
|
||||||
description="Language model that will run the agent.", default=None
|
description="Language model that will run the agent.", default=None
|
||||||
)
|
)
|
||||||
config: Optional[Union[Json, Dict[str, Any]]] = Field(default=None)
|
config: Optional[Union[Json, Dict[str, Any]]] = Field(default=None)
|
||||||
@@ -246,9 +245,15 @@ class Crew(BaseModel):
|
|||||||
if self.output_log_file:
|
if self.output_log_file:
|
||||||
self._file_handler = FileHandler(self.output_log_file)
|
self._file_handler = FileHandler(self.output_log_file)
|
||||||
self._rpm_controller = RPMController(max_rpm=self.max_rpm, logger=self._logger)
|
self._rpm_controller = RPMController(max_rpm=self.max_rpm, logger=self._logger)
|
||||||
if self.function_calling_llm and not isinstance(self.function_calling_llm, LLM):
|
if self.function_calling_llm:
|
||||||
self.function_calling_llm = create_llm(self.function_calling_llm)
|
if isinstance(self.function_calling_llm, str):
|
||||||
|
self.function_calling_llm = LLM(model=self.function_calling_llm)
|
||||||
|
elif not isinstance(self.function_calling_llm, LLM):
|
||||||
|
self.function_calling_llm = LLM(
|
||||||
|
model=getattr(self.function_calling_llm, "model_name", None)
|
||||||
|
or getattr(self.function_calling_llm, "deployment_name", None)
|
||||||
|
or str(self.function_calling_llm)
|
||||||
|
)
|
||||||
self._telemetry = Telemetry()
|
self._telemetry = Telemetry()
|
||||||
self._telemetry.set_tracer()
|
self._telemetry.set_tracer()
|
||||||
return self
|
return self
|
||||||
|
|||||||
@@ -1,13 +1,9 @@
|
|||||||
import ast
|
import ast
|
||||||
import datetime
|
import datetime
|
||||||
import json
|
|
||||||
import re
|
|
||||||
import time
|
import time
|
||||||
from difflib import SequenceMatcher
|
from difflib import SequenceMatcher
|
||||||
from textwrap import dedent
|
from textwrap import dedent
|
||||||
from typing import Any, Dict, List, Union
|
from typing import Any, List, Union
|
||||||
|
|
||||||
from json_repair import repair_json
|
|
||||||
|
|
||||||
import crewai.utilities.events as events
|
import crewai.utilities.events as events
|
||||||
from crewai.agents.tools_handler import ToolsHandler
|
from crewai.agents.tools_handler import ToolsHandler
|
||||||
@@ -23,15 +19,7 @@ try:
|
|||||||
import agentops # type: ignore
|
import agentops # type: ignore
|
||||||
except ImportError:
|
except ImportError:
|
||||||
agentops = None
|
agentops = None
|
||||||
OPENAI_BIGGER_MODELS = [
|
OPENAI_BIGGER_MODELS = ["gpt-4", "gpt-4o", "o1-preview", "o1-mini", "o1", "o3", "o3-mini"]
|
||||||
"gpt-4",
|
|
||||||
"gpt-4o",
|
|
||||||
"o1-preview",
|
|
||||||
"o1-mini",
|
|
||||||
"o1",
|
|
||||||
"o3",
|
|
||||||
"o3-mini",
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
class ToolUsageErrorException(Exception):
|
class ToolUsageErrorException(Exception):
|
||||||
@@ -92,7 +80,7 @@ class ToolUsage:
|
|||||||
self._max_parsing_attempts = 2
|
self._max_parsing_attempts = 2
|
||||||
self._remember_format_after_usages = 4
|
self._remember_format_after_usages = 4
|
||||||
|
|
||||||
def parse_tool_calling(self, tool_string: str):
|
def parse(self, tool_string: str):
|
||||||
"""Parse the tool string and return the tool calling."""
|
"""Parse the tool string and return the tool calling."""
|
||||||
return self._tool_calling(tool_string)
|
return self._tool_calling(tool_string)
|
||||||
|
|
||||||
@@ -106,6 +94,7 @@ class ToolUsage:
|
|||||||
self.task.increment_tools_errors()
|
self.task.increment_tools_errors()
|
||||||
return error
|
return error
|
||||||
|
|
||||||
|
# BUG? The code below seems to be unreachable
|
||||||
try:
|
try:
|
||||||
tool = self._select_tool(calling.tool_name)
|
tool = self._select_tool(calling.tool_name)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@@ -127,7 +116,7 @@ class ToolUsage:
|
|||||||
self._printer.print(content=f"\n\n{error}\n", color="red")
|
self._printer.print(content=f"\n\n{error}\n", color="red")
|
||||||
return error
|
return error
|
||||||
|
|
||||||
return f"{self._use(tool_string=tool_string, tool=tool, calling=calling)}"
|
return f"{self._use(tool_string=tool_string, tool=tool, calling=calling)}" # type: ignore # BUG?: "_use" of "ToolUsage" does not return a value (it only ever returns None)
|
||||||
|
|
||||||
def _use(
|
def _use(
|
||||||
self,
|
self,
|
||||||
@@ -360,13 +349,13 @@ class ToolUsage:
|
|||||||
tool_name = self.action.tool
|
tool_name = self.action.tool
|
||||||
tool = self._select_tool(tool_name)
|
tool = self._select_tool(tool_name)
|
||||||
try:
|
try:
|
||||||
arguments = self._validate_tool_input(self.action.tool_input)
|
tool_input = self._validate_tool_input(self.action.tool_input)
|
||||||
|
arguments = ast.literal_eval(tool_input)
|
||||||
except Exception:
|
except Exception:
|
||||||
if raise_error:
|
if raise_error:
|
||||||
raise
|
raise
|
||||||
else:
|
else:
|
||||||
return ToolUsageErrorException(
|
return ToolUsageErrorException( # type: ignore # Incompatible return value type (got "ToolUsageErrorException", expected "ToolCalling | InstructorToolCalling")
|
||||||
f'{self._i18n.errors("tool_arguments_error")}'
|
f'{self._i18n.errors("tool_arguments_error")}'
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -374,14 +363,14 @@ class ToolUsage:
|
|||||||
if raise_error:
|
if raise_error:
|
||||||
raise
|
raise
|
||||||
else:
|
else:
|
||||||
return ToolUsageErrorException(
|
return ToolUsageErrorException( # type: ignore # Incompatible return value type (got "ToolUsageErrorException", expected "ToolCalling | InstructorToolCalling")
|
||||||
f'{self._i18n.errors("tool_arguments_error")}'
|
f'{self._i18n.errors("tool_arguments_error")}'
|
||||||
)
|
)
|
||||||
|
|
||||||
return ToolCalling(
|
return ToolCalling(
|
||||||
tool_name=tool.name,
|
tool_name=tool.name,
|
||||||
arguments=arguments,
|
arguments=arguments,
|
||||||
log=tool_string,
|
log=tool_string, # type: ignore
|
||||||
)
|
)
|
||||||
|
|
||||||
def _tool_calling(
|
def _tool_calling(
|
||||||
@@ -407,28 +396,57 @@ class ToolUsage:
|
|||||||
)
|
)
|
||||||
return self._tool_calling(tool_string)
|
return self._tool_calling(tool_string)
|
||||||
|
|
||||||
def _validate_tool_input(self, tool_input: str) -> Dict[str, Any]:
|
def _validate_tool_input(self, tool_input: str) -> str:
|
||||||
try:
|
try:
|
||||||
# Replace Python literals with JSON equivalents
|
ast.literal_eval(tool_input)
|
||||||
replacements = {
|
return tool_input
|
||||||
r"'": '"',
|
except Exception:
|
||||||
r"None": "null",
|
# Clean and ensure the string is properly enclosed in braces
|
||||||
r"True": "true",
|
tool_input = tool_input.strip()
|
||||||
r"False": "false",
|
if not tool_input.startswith("{"):
|
||||||
}
|
tool_input = "{" + tool_input
|
||||||
for pattern, replacement in replacements.items():
|
if not tool_input.endswith("}"):
|
||||||
tool_input = re.sub(pattern, replacement, tool_input)
|
tool_input += "}"
|
||||||
|
|
||||||
arguments = json.loads(tool_input)
|
# Manually split the input into key-value pairs
|
||||||
except json.JSONDecodeError:
|
entries = tool_input.strip("{} ").split(",")
|
||||||
# Attempt to repair JSON string
|
formatted_entries = []
|
||||||
repaired_input = repair_json(tool_input)
|
|
||||||
try:
|
|
||||||
arguments = json.loads(repaired_input)
|
|
||||||
except json.JSONDecodeError as e:
|
|
||||||
raise Exception(f"Invalid tool input JSON: {e}")
|
|
||||||
|
|
||||||
return arguments
|
for entry in entries:
|
||||||
|
if ":" not in entry:
|
||||||
|
continue # Skip malformed entries
|
||||||
|
key, value = entry.split(":", 1)
|
||||||
|
|
||||||
|
# Remove extraneous white spaces and quotes, replace single quotes
|
||||||
|
key = key.strip().strip('"').replace("'", '"')
|
||||||
|
value = value.strip()
|
||||||
|
|
||||||
|
# Handle replacement of single quotes at the start and end of the value string
|
||||||
|
if value.startswith("'") and value.endswith("'"):
|
||||||
|
value = value[1:-1] # Remove single quotes
|
||||||
|
value = (
|
||||||
|
'"' + value.replace('"', '\\"') + '"'
|
||||||
|
) # Re-encapsulate with double quotes
|
||||||
|
elif value.isdigit(): # Check if value is a digit, hence integer
|
||||||
|
value = value
|
||||||
|
elif value.lower() in [
|
||||||
|
"true",
|
||||||
|
"false",
|
||||||
|
]: # Check for boolean and null values
|
||||||
|
value = value.lower().capitalize()
|
||||||
|
elif value.lower() == "null":
|
||||||
|
value = "None"
|
||||||
|
else:
|
||||||
|
# Assume the value is a string and needs quotes
|
||||||
|
value = '"' + value.replace('"', '\\"') + '"'
|
||||||
|
|
||||||
|
# Rebuild the entry with proper quoting
|
||||||
|
formatted_entry = f'"{key}": {value}'
|
||||||
|
formatted_entries.append(formatted_entry)
|
||||||
|
|
||||||
|
# Reconstruct the JSON string
|
||||||
|
new_json_string = "{" + ", ".join(formatted_entries) + "}"
|
||||||
|
return new_json_string
|
||||||
|
|
||||||
def on_tool_error(self, tool: Any, tool_calling: ToolCalling, e: Exception) -> None:
|
def on_tool_error(self, tool: Any, tool_calling: ToolCalling, e: Exception) -> None:
|
||||||
event_data = self._prepare_event_data(tool, tool_calling)
|
event_data = self._prepare_event_data(tool, tool_calling)
|
||||||
|
|||||||
@@ -9,11 +9,11 @@
|
|||||||
"task": "\nCurrent Task: {input}\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:",
|
"task": "\nCurrent Task: {input}\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:",
|
||||||
"memory": "\n\n# Useful context: \n{memory}",
|
"memory": "\n\n# Useful context: \n{memory}",
|
||||||
"role_playing": "You are {role}. {backstory}\nYour personal goal is: {goal}",
|
"role_playing": "You are {role}. {backstory}\nYour personal goal is: {goal}",
|
||||||
"tools": "\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce all necessary information is gathered, return the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n```",
|
"tools": "\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nUse the following format:\n\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple python dictionary, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n\nOnce all necessary information is gathered:\n\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n",
|
||||||
"no_tools": "\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!",
|
"no_tools": "\nTo give my best complete final answer to the task use the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!",
|
||||||
"format": "I MUST either use a tool (use one at time) OR give my best final answer not both at the same time. When responding, I must use the following format:\n\n```\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action, dictionary enclosed in curly braces\nObservation: the result of the action\n```\nThis Thought/Action/Action Input/Result can repeat N times. Once I know the final answer, I must return the following format:\n\n```\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n```",
|
"format": "I MUST either use a tool (use one at time) OR give my best final answer not both at the same time. To Use the following format:\n\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action, dictionary enclosed in curly braces\nObservation: the result of the action\n... (this Thought/Action/Action Input/Result can repeat N times)\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n",
|
||||||
"final_answer_format": "If you don't need to use any more tools, you must give your best complete final answer, make sure it satisfies the expected criteria, use the EXACT format below:\n\n```\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\n\n```",
|
"final_answer_format": "If you don't need to use any more tools, you must give your best complete final answer, make sure it satisfies the expected criteria, use the EXACT format below:\n\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\n\n",
|
||||||
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nHere is the expected format I must follow:\n\n```\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n```\n This Thought/Action/Action Input/Result process can repeat N times. Once I know the final answer, I must return the following format:\n\n```\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n```",
|
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nI just remembered the expected format I must follow:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Result can repeat N times)\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n",
|
||||||
"task_with_context": "{task}\n\nThis is the context you're working with:\n{context}",
|
"task_with_context": "{task}\n\nThis is the context you're working with:\n{context}",
|
||||||
"expected_output": "\nThis is the expect criteria for your final answer: {expected_output}\nyou MUST return the actual complete content as the final answer, not a summary.",
|
"expected_output": "\nThis is the expect criteria for your final answer: {expected_output}\nyou MUST return the actual complete content as the final answer, not a summary.",
|
||||||
"human_feedback": "You got human feedback on your work, re-evaluate it and give a new Final Answer when ready.\n {human_feedback}",
|
"human_feedback": "You got human feedback on your work, re-evaluate it and give a new Final Answer when ready.\n {human_feedback}",
|
||||||
|
|||||||
@@ -1,117 +0,0 @@
|
|||||||
interactions:
|
|
||||||
- request:
|
|
||||||
body: '{"messages": [{"role": "system", "content": "You are Futel Official Infopoint.
|
|
||||||
Futel Football Club info\nYour personal goal is: Answer questions about Futel\nTo
|
|
||||||
give my best complete final answer to the task respond using the exact following
|
|
||||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
|
||||||
answer must be the great and the most complete as possible, it must be outcome
|
|
||||||
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
|
||||||
"content": "\nCurrent Task: Test task\n\nThis is the expect criteria for your
|
|
||||||
final answer: Your best answer to your coworker asking you this, accounting
|
|
||||||
for the context shared.\nyou MUST return the actual complete content as the
|
|
||||||
final answer, not a summary.\n\nBegin! This is VERY important to you, use the
|
|
||||||
tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],
|
|
||||||
"model": "gpt-4o", "stop": ["\nObservation:"], "stream": false}'
|
|
||||||
headers:
|
|
||||||
accept:
|
|
||||||
- application/json
|
|
||||||
accept-encoding:
|
|
||||||
- gzip, deflate
|
|
||||||
connection:
|
|
||||||
- keep-alive
|
|
||||||
content-length:
|
|
||||||
- '939'
|
|
||||||
content-type:
|
|
||||||
- application/json
|
|
||||||
cookie:
|
|
||||||
- __cf_bm=cwWdOaPJjFMNJaLtJfa8Kjqavswg5bzVRFzBX4gneGw-1736458417-1.0.1.1-bvf2HshgcMtgn7GdxqwySFDAIacGccDFfEXniBFTTDmbGMCiIIwf6t2DiwWnBldmUHixwc5kDO9gYs08g.feBA;
|
|
||||||
_cfuvid=WMw7PSqkYqQOieguBRs0uNkwNU92A.ZKbgDbCAcV3EQ-1736458417825-0.0.1.1-604800000
|
|
||||||
host:
|
|
||||||
- api.openai.com
|
|
||||||
user-agent:
|
|
||||||
- OpenAI/Python 1.52.1
|
|
||||||
x-stainless-arch:
|
|
||||||
- arm64
|
|
||||||
x-stainless-async:
|
|
||||||
- 'false'
|
|
||||||
x-stainless-lang:
|
|
||||||
- python
|
|
||||||
x-stainless-os:
|
|
||||||
- MacOS
|
|
||||||
x-stainless-package-version:
|
|
||||||
- 1.52.1
|
|
||||||
x-stainless-raw-response:
|
|
||||||
- 'true'
|
|
||||||
x-stainless-retry-count:
|
|
||||||
- '0'
|
|
||||||
x-stainless-runtime:
|
|
||||||
- CPython
|
|
||||||
x-stainless-runtime-version:
|
|
||||||
- 3.12.7
|
|
||||||
method: POST
|
|
||||||
uri: https://api.openai.com/v1/chat/completions
|
|
||||||
response:
|
|
||||||
content: "{\n \"id\": \"chatcmpl-AnuRlxiTxduAVoXHHY58Fvfbll5IS\",\n \"object\":
|
|
||||||
\"chat.completion\",\n \"created\": 1736458417,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
|
||||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
|
||||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
|
||||||
Answer: This is a test task, and the context or question from the coworker is
|
|
||||||
not specified. Therefore, my best effort would be to affirm my readiness to
|
|
||||||
answer accurately and in detail any question about Futel Football Club based
|
|
||||||
on the context described. If provided with specific information or questions,
|
|
||||||
I will ensure to respond comprehensively as required by my job directives.\",\n
|
|
||||||
\ \"refusal\": null\n },\n \"logprobs\": null,\n \"finish_reason\":
|
|
||||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 177,\n \"completion_tokens\":
|
|
||||||
82,\n \"total_tokens\": 259,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
|
||||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
|
||||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
|
||||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
|
|
||||||
\"fp_703d4ff298\"\n}\n"
|
|
||||||
headers:
|
|
||||||
CF-Cache-Status:
|
|
||||||
- DYNAMIC
|
|
||||||
CF-RAY:
|
|
||||||
- 8ff78bf7bd6cc002-ATL
|
|
||||||
Connection:
|
|
||||||
- keep-alive
|
|
||||||
Content-Encoding:
|
|
||||||
- gzip
|
|
||||||
Content-Type:
|
|
||||||
- application/json
|
|
||||||
Date:
|
|
||||||
- Thu, 09 Jan 2025 21:33:40 GMT
|
|
||||||
Server:
|
|
||||||
- cloudflare
|
|
||||||
Transfer-Encoding:
|
|
||||||
- chunked
|
|
||||||
X-Content-Type-Options:
|
|
||||||
- nosniff
|
|
||||||
access-control-expose-headers:
|
|
||||||
- X-Request-ID
|
|
||||||
alt-svc:
|
|
||||||
- h3=":443"; ma=86400
|
|
||||||
openai-organization:
|
|
||||||
- crewai-iuxna1
|
|
||||||
openai-processing-ms:
|
|
||||||
- '2263'
|
|
||||||
openai-version:
|
|
||||||
- '2020-10-01'
|
|
||||||
strict-transport-security:
|
|
||||||
- max-age=31536000; includeSubDomains; preload
|
|
||||||
x-ratelimit-limit-requests:
|
|
||||||
- '10000'
|
|
||||||
x-ratelimit-limit-tokens:
|
|
||||||
- '30000000'
|
|
||||||
x-ratelimit-remaining-requests:
|
|
||||||
- '9999'
|
|
||||||
x-ratelimit-remaining-tokens:
|
|
||||||
- '29999786'
|
|
||||||
x-ratelimit-reset-requests:
|
|
||||||
- 6ms
|
|
||||||
x-ratelimit-reset-tokens:
|
|
||||||
- 0s
|
|
||||||
x-request-id:
|
|
||||||
- req_7c1a31da73cd103e9f410f908e59187f
|
|
||||||
http_version: HTTP/1.1
|
|
||||||
status_code: 200
|
|
||||||
version: 1
|
|
||||||
@@ -1,119 +0,0 @@
|
|||||||
interactions:
|
|
||||||
- request:
|
|
||||||
body: '{"messages": [{"role": "system", "content": "You are Futel Official Infopoint.
|
|
||||||
Futel Football Club info\nYour personal goal is: Answer questions about Futel\nTo
|
|
||||||
give my best complete final answer to the task respond using the exact following
|
|
||||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
|
||||||
answer must be the great and the most complete as possible, it must be outcome
|
|
||||||
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
|
||||||
"content": "\nCurrent Task: Test task\n\nThis is the expect criteria for your
|
|
||||||
final answer: Your best answer to your coworker asking you this, accounting
|
|
||||||
for the context shared.\nyou MUST return the actual complete content as the
|
|
||||||
final answer, not a summary.\n\nBegin! This is VERY important to you, use the
|
|
||||||
tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],
|
|
||||||
"model": "gpt-4o", "stop": ["\nObservation:"], "stream": false}'
|
|
||||||
headers:
|
|
||||||
accept:
|
|
||||||
- application/json
|
|
||||||
accept-encoding:
|
|
||||||
- gzip, deflate
|
|
||||||
connection:
|
|
||||||
- keep-alive
|
|
||||||
content-length:
|
|
||||||
- '939'
|
|
||||||
content-type:
|
|
||||||
- application/json
|
|
||||||
cookie:
|
|
||||||
- __cf_bm=cwWdOaPJjFMNJaLtJfa8Kjqavswg5bzVRFzBX4gneGw-1736458417-1.0.1.1-bvf2HshgcMtgn7GdxqwySFDAIacGccDFfEXniBFTTDmbGMCiIIwf6t2DiwWnBldmUHixwc5kDO9gYs08g.feBA;
|
|
||||||
_cfuvid=WMw7PSqkYqQOieguBRs0uNkwNU92A.ZKbgDbCAcV3EQ-1736458417825-0.0.1.1-604800000
|
|
||||||
host:
|
|
||||||
- api.openai.com
|
|
||||||
user-agent:
|
|
||||||
- OpenAI/Python 1.52.1
|
|
||||||
x-stainless-arch:
|
|
||||||
- arm64
|
|
||||||
x-stainless-async:
|
|
||||||
- 'false'
|
|
||||||
x-stainless-lang:
|
|
||||||
- python
|
|
||||||
x-stainless-os:
|
|
||||||
- MacOS
|
|
||||||
x-stainless-package-version:
|
|
||||||
- 1.52.1
|
|
||||||
x-stainless-raw-response:
|
|
||||||
- 'true'
|
|
||||||
x-stainless-retry-count:
|
|
||||||
- '0'
|
|
||||||
x-stainless-runtime:
|
|
||||||
- CPython
|
|
||||||
x-stainless-runtime-version:
|
|
||||||
- 3.12.7
|
|
||||||
method: POST
|
|
||||||
uri: https://api.openai.com/v1/chat/completions
|
|
||||||
response:
|
|
||||||
content: "{\n \"id\": \"chatcmpl-AnuRrFJZGKw8cIEshvuW1PKwFZFKs\",\n \"object\":
|
|
||||||
\"chat.completion\",\n \"created\": 1736458423,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
|
||||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
|
||||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
|
||||||
Answer: Although you mentioned this being a \\\"Test task\\\" and haven't provided
|
|
||||||
a specific question regarding Futel Football Club, your request appears to involve
|
|
||||||
ensuring accuracy and detail in responses. For a proper answer about Futel,
|
|
||||||
I'd be ready to provide details about the club's history, management, players,
|
|
||||||
match schedules, and recent performance statistics. Remember to ask specific
|
|
||||||
questions to receive a targeted response. If this were a real context where
|
|
||||||
information was shared, I would respond precisely to what's been asked regarding
|
|
||||||
Futel Football Club.\",\n \"refusal\": null\n },\n \"logprobs\":
|
|
||||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
|
||||||
177,\n \"completion_tokens\": 113,\n \"total_tokens\": 290,\n \"prompt_tokens_details\":
|
|
||||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
|
||||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
|
||||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
|
|
||||||
\"fp_703d4ff298\"\n}\n"
|
|
||||||
headers:
|
|
||||||
CF-Cache-Status:
|
|
||||||
- DYNAMIC
|
|
||||||
CF-RAY:
|
|
||||||
- 8ff78c1d0ecdc002-ATL
|
|
||||||
Connection:
|
|
||||||
- keep-alive
|
|
||||||
Content-Encoding:
|
|
||||||
- gzip
|
|
||||||
Content-Type:
|
|
||||||
- application/json
|
|
||||||
Date:
|
|
||||||
- Thu, 09 Jan 2025 21:33:47 GMT
|
|
||||||
Server:
|
|
||||||
- cloudflare
|
|
||||||
Transfer-Encoding:
|
|
||||||
- chunked
|
|
||||||
X-Content-Type-Options:
|
|
||||||
- nosniff
|
|
||||||
access-control-expose-headers:
|
|
||||||
- X-Request-ID
|
|
||||||
alt-svc:
|
|
||||||
- h3=":443"; ma=86400
|
|
||||||
openai-organization:
|
|
||||||
- crewai-iuxna1
|
|
||||||
openai-processing-ms:
|
|
||||||
- '3097'
|
|
||||||
openai-version:
|
|
||||||
- '2020-10-01'
|
|
||||||
strict-transport-security:
|
|
||||||
- max-age=31536000; includeSubDomains; preload
|
|
||||||
x-ratelimit-limit-requests:
|
|
||||||
- '10000'
|
|
||||||
x-ratelimit-limit-tokens:
|
|
||||||
- '30000000'
|
|
||||||
x-ratelimit-remaining-requests:
|
|
||||||
- '9999'
|
|
||||||
x-ratelimit-remaining-tokens:
|
|
||||||
- '29999786'
|
|
||||||
x-ratelimit-reset-requests:
|
|
||||||
- 6ms
|
|
||||||
x-ratelimit-reset-tokens:
|
|
||||||
- 0s
|
|
||||||
x-request-id:
|
|
||||||
- req_179e1d56e2b17303e40480baffbc7b08
|
|
||||||
http_version: HTTP/1.1
|
|
||||||
status_code: 200
|
|
||||||
version: 1
|
|
||||||
@@ -1,114 +0,0 @@
|
|||||||
interactions:
|
|
||||||
- request:
|
|
||||||
body: '{"messages": [{"role": "system", "content": "You are Futel Official Infopoint.
|
|
||||||
Futel Football Club info\nYour personal goal is: Answer questions about Futel\nTo
|
|
||||||
give my best complete final answer to the task respond using the exact following
|
|
||||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
|
||||||
answer must be the great and the most complete as possible, it must be outcome
|
|
||||||
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
|
||||||
"content": "\nCurrent Task: Test task\n\nThis is the expect criteria for your
|
|
||||||
final answer: Your best answer to your coworker asking you this, accounting
|
|
||||||
for the context shared.\nyou MUST return the actual complete content as the
|
|
||||||
final answer, not a summary.\n\nBegin! This is VERY important to you, use the
|
|
||||||
tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],
|
|
||||||
"model": "gpt-4o", "stop": ["\nObservation:"], "stream": false}'
|
|
||||||
headers:
|
|
||||||
accept:
|
|
||||||
- application/json
|
|
||||||
accept-encoding:
|
|
||||||
- gzip, deflate
|
|
||||||
connection:
|
|
||||||
- keep-alive
|
|
||||||
content-length:
|
|
||||||
- '939'
|
|
||||||
content-type:
|
|
||||||
- application/json
|
|
||||||
cookie:
|
|
||||||
- __cf_bm=cwWdOaPJjFMNJaLtJfa8Kjqavswg5bzVRFzBX4gneGw-1736458417-1.0.1.1-bvf2HshgcMtgn7GdxqwySFDAIacGccDFfEXniBFTTDmbGMCiIIwf6t2DiwWnBldmUHixwc5kDO9gYs08g.feBA;
|
|
||||||
_cfuvid=WMw7PSqkYqQOieguBRs0uNkwNU92A.ZKbgDbCAcV3EQ-1736458417825-0.0.1.1-604800000
|
|
||||||
host:
|
|
||||||
- api.openai.com
|
|
||||||
user-agent:
|
|
||||||
- OpenAI/Python 1.52.1
|
|
||||||
x-stainless-arch:
|
|
||||||
- arm64
|
|
||||||
x-stainless-async:
|
|
||||||
- 'false'
|
|
||||||
x-stainless-lang:
|
|
||||||
- python
|
|
||||||
x-stainless-os:
|
|
||||||
- MacOS
|
|
||||||
x-stainless-package-version:
|
|
||||||
- 1.52.1
|
|
||||||
x-stainless-raw-response:
|
|
||||||
- 'true'
|
|
||||||
x-stainless-retry-count:
|
|
||||||
- '0'
|
|
||||||
x-stainless-runtime:
|
|
||||||
- CPython
|
|
||||||
x-stainless-runtime-version:
|
|
||||||
- 3.12.7
|
|
||||||
method: POST
|
|
||||||
uri: https://api.openai.com/v1/chat/completions
|
|
||||||
response:
|
|
||||||
content: "{\n \"id\": \"chatcmpl-AnuRqgg7eiHnDi2DOqdk99fiqOboz\",\n \"object\":
|
|
||||||
\"chat.completion\",\n \"created\": 1736458422,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
|
||||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
|
||||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
|
||||||
Answer: Your best answer to your coworker asking you this, accounting for the
|
|
||||||
context shared. You MUST return the actual complete content as the final answer,
|
|
||||||
not a summary.\",\n \"refusal\": null\n },\n \"logprobs\":
|
|
||||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
|
||||||
177,\n \"completion_tokens\": 44,\n \"total_tokens\": 221,\n \"prompt_tokens_details\":
|
|
||||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
|
||||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
|
||||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
|
|
||||||
\"fp_703d4ff298\"\n}\n"
|
|
||||||
headers:
|
|
||||||
CF-Cache-Status:
|
|
||||||
- DYNAMIC
|
|
||||||
CF-RAY:
|
|
||||||
- 8ff78c164ad2c002-ATL
|
|
||||||
Connection:
|
|
||||||
- keep-alive
|
|
||||||
Content-Encoding:
|
|
||||||
- gzip
|
|
||||||
Content-Type:
|
|
||||||
- application/json
|
|
||||||
Date:
|
|
||||||
- Thu, 09 Jan 2025 21:33:43 GMT
|
|
||||||
Server:
|
|
||||||
- cloudflare
|
|
||||||
Transfer-Encoding:
|
|
||||||
- chunked
|
|
||||||
X-Content-Type-Options:
|
|
||||||
- nosniff
|
|
||||||
access-control-expose-headers:
|
|
||||||
- X-Request-ID
|
|
||||||
alt-svc:
|
|
||||||
- h3=":443"; ma=86400
|
|
||||||
openai-organization:
|
|
||||||
- crewai-iuxna1
|
|
||||||
openai-processing-ms:
|
|
||||||
- '899'
|
|
||||||
openai-version:
|
|
||||||
- '2020-10-01'
|
|
||||||
strict-transport-security:
|
|
||||||
- max-age=31536000; includeSubDomains; preload
|
|
||||||
x-ratelimit-limit-requests:
|
|
||||||
- '10000'
|
|
||||||
x-ratelimit-limit-tokens:
|
|
||||||
- '30000000'
|
|
||||||
x-ratelimit-remaining-requests:
|
|
||||||
- '9999'
|
|
||||||
x-ratelimit-remaining-tokens:
|
|
||||||
- '29999786'
|
|
||||||
x-ratelimit-reset-requests:
|
|
||||||
- 6ms
|
|
||||||
x-ratelimit-reset-tokens:
|
|
||||||
- 0s
|
|
||||||
x-request-id:
|
|
||||||
- req_9f5226208edb90a27987aaf7e0ca03d3
|
|
||||||
http_version: HTTP/1.1
|
|
||||||
status_code: 200
|
|
||||||
version: 1
|
|
||||||
@@ -1,119 +0,0 @@
|
|||||||
interactions:
|
|
||||||
- request:
|
|
||||||
body: '{"messages": [{"role": "system", "content": "You are Futel Official Infopoint.
|
|
||||||
Futel Football Club info\nYour personal goal is: Answer questions about Futel\nTo
|
|
||||||
give my best complete final answer to the task respond using the exact following
|
|
||||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
|
||||||
answer must be the great and the most complete as possible, it must be outcome
|
|
||||||
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
|
||||||
"content": "\nCurrent Task: Test task\n\nThis is the expect criteria for your
|
|
||||||
final answer: Your best answer to your coworker asking you this, accounting
|
|
||||||
for the context shared.\nyou MUST return the actual complete content as the
|
|
||||||
final answer, not a summary.\n\nBegin! This is VERY important to you, use the
|
|
||||||
tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],
|
|
||||||
"model": "gpt-4o", "stop": ["\nObservation:"], "stream": false}'
|
|
||||||
headers:
|
|
||||||
accept:
|
|
||||||
- application/json
|
|
||||||
accept-encoding:
|
|
||||||
- gzip, deflate
|
|
||||||
connection:
|
|
||||||
- keep-alive
|
|
||||||
content-length:
|
|
||||||
- '939'
|
|
||||||
content-type:
|
|
||||||
- application/json
|
|
||||||
host:
|
|
||||||
- api.openai.com
|
|
||||||
user-agent:
|
|
||||||
- OpenAI/Python 1.52.1
|
|
||||||
x-stainless-arch:
|
|
||||||
- arm64
|
|
||||||
x-stainless-async:
|
|
||||||
- 'false'
|
|
||||||
x-stainless-lang:
|
|
||||||
- python
|
|
||||||
x-stainless-os:
|
|
||||||
- MacOS
|
|
||||||
x-stainless-package-version:
|
|
||||||
- 1.52.1
|
|
||||||
x-stainless-raw-response:
|
|
||||||
- 'true'
|
|
||||||
x-stainless-retry-count:
|
|
||||||
- '0'
|
|
||||||
x-stainless-runtime:
|
|
||||||
- CPython
|
|
||||||
x-stainless-runtime-version:
|
|
||||||
- 3.12.7
|
|
||||||
method: POST
|
|
||||||
uri: https://api.openai.com/v1/chat/completions
|
|
||||||
response:
|
|
||||||
content: "{\n \"id\": \"chatcmpl-AnuRjmwH5mrykLxQhFwTqqTiDtuTf\",\n \"object\":
|
|
||||||
\"chat.completion\",\n \"created\": 1736458415,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
|
||||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
|
||||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
|
||||||
Answer: As this is a test task, please note that Futel Football Club is fictional
|
|
||||||
and any specific details about it would not be available. However, if you have
|
|
||||||
specific questions or need information about a particular aspect of Futel or
|
|
||||||
any general football club inquiry, feel free to ask, and I'll do my best to
|
|
||||||
assist you with your query!\",\n \"refusal\": null\n },\n \"logprobs\":
|
|
||||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
|
||||||
177,\n \"completion_tokens\": 79,\n \"total_tokens\": 256,\n \"prompt_tokens_details\":
|
|
||||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
|
||||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
|
||||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
|
|
||||||
\"fp_703d4ff298\"\n}\n"
|
|
||||||
headers:
|
|
||||||
CF-Cache-Status:
|
|
||||||
- DYNAMIC
|
|
||||||
CF-RAY:
|
|
||||||
- 8ff78be5eebfc002-ATL
|
|
||||||
Connection:
|
|
||||||
- keep-alive
|
|
||||||
Content-Encoding:
|
|
||||||
- gzip
|
|
||||||
Content-Type:
|
|
||||||
- application/json
|
|
||||||
Date:
|
|
||||||
- Thu, 09 Jan 2025 21:33:37 GMT
|
|
||||||
Server:
|
|
||||||
- cloudflare
|
|
||||||
Set-Cookie:
|
|
||||||
- __cf_bm=cwWdOaPJjFMNJaLtJfa8Kjqavswg5bzVRFzBX4gneGw-1736458417-1.0.1.1-bvf2HshgcMtgn7GdxqwySFDAIacGccDFfEXniBFTTDmbGMCiIIwf6t2DiwWnBldmUHixwc5kDO9gYs08g.feBA;
|
|
||||||
path=/; expires=Thu, 09-Jan-25 22:03:37 GMT; domain=.api.openai.com; HttpOnly;
|
|
||||||
Secure; SameSite=None
|
|
||||||
- _cfuvid=WMw7PSqkYqQOieguBRs0uNkwNU92A.ZKbgDbCAcV3EQ-1736458417825-0.0.1.1-604800000;
|
|
||||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
|
||||||
Transfer-Encoding:
|
|
||||||
- chunked
|
|
||||||
X-Content-Type-Options:
|
|
||||||
- nosniff
|
|
||||||
access-control-expose-headers:
|
|
||||||
- X-Request-ID
|
|
||||||
alt-svc:
|
|
||||||
- h3=":443"; ma=86400
|
|
||||||
openai-organization:
|
|
||||||
- crewai-iuxna1
|
|
||||||
openai-processing-ms:
|
|
||||||
- '2730'
|
|
||||||
openai-version:
|
|
||||||
- '2020-10-01'
|
|
||||||
strict-transport-security:
|
|
||||||
- max-age=31536000; includeSubDomains; preload
|
|
||||||
x-ratelimit-limit-requests:
|
|
||||||
- '10000'
|
|
||||||
x-ratelimit-limit-tokens:
|
|
||||||
- '30000000'
|
|
||||||
x-ratelimit-remaining-requests:
|
|
||||||
- '9999'
|
|
||||||
x-ratelimit-remaining-tokens:
|
|
||||||
- '29999786'
|
|
||||||
x-ratelimit-reset-requests:
|
|
||||||
- 6ms
|
|
||||||
x-ratelimit-reset-tokens:
|
|
||||||
- 0s
|
|
||||||
x-request-id:
|
|
||||||
- req_014478ba748f860d10ac250ca0ba824a
|
|
||||||
http_version: HTTP/1.1
|
|
||||||
status_code: 200
|
|
||||||
version: 1
|
|
||||||
@@ -1,119 +0,0 @@
|
|||||||
interactions:
|
|
||||||
- request:
|
|
||||||
body: '{"messages": [{"role": "system", "content": "You are Futel Official Infopoint.
|
|
||||||
Futel Football Club info\nYour personal goal is: Answer questions about Futel\nTo
|
|
||||||
give my best complete final answer to the task respond using the exact following
|
|
||||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
|
||||||
answer must be the great and the most complete as possible, it must be outcome
|
|
||||||
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
|
||||||
"content": "\nCurrent Task: Test task\n\nThis is the expect criteria for your
|
|
||||||
final answer: Your best answer to your coworker asking you this, accounting
|
|
||||||
for the context shared.\nyou MUST return the actual complete content as the
|
|
||||||
final answer, not a summary.\n\nBegin! This is VERY important to you, use the
|
|
||||||
tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],
|
|
||||||
"model": "gpt-4o", "stop": ["\nObservation:"], "stream": false}'
|
|
||||||
headers:
|
|
||||||
accept:
|
|
||||||
- application/json
|
|
||||||
accept-encoding:
|
|
||||||
- gzip, deflate
|
|
||||||
connection:
|
|
||||||
- keep-alive
|
|
||||||
content-length:
|
|
||||||
- '939'
|
|
||||||
content-type:
|
|
||||||
- application/json
|
|
||||||
cookie:
|
|
||||||
- __cf_bm=cwWdOaPJjFMNJaLtJfa8Kjqavswg5bzVRFzBX4gneGw-1736458417-1.0.1.1-bvf2HshgcMtgn7GdxqwySFDAIacGccDFfEXniBFTTDmbGMCiIIwf6t2DiwWnBldmUHixwc5kDO9gYs08g.feBA;
|
|
||||||
_cfuvid=WMw7PSqkYqQOieguBRs0uNkwNU92A.ZKbgDbCAcV3EQ-1736458417825-0.0.1.1-604800000
|
|
||||||
host:
|
|
||||||
- api.openai.com
|
|
||||||
user-agent:
|
|
||||||
- OpenAI/Python 1.52.1
|
|
||||||
x-stainless-arch:
|
|
||||||
- arm64
|
|
||||||
x-stainless-async:
|
|
||||||
- 'false'
|
|
||||||
x-stainless-lang:
|
|
||||||
- python
|
|
||||||
x-stainless-os:
|
|
||||||
- MacOS
|
|
||||||
x-stainless-package-version:
|
|
||||||
- 1.52.1
|
|
||||||
x-stainless-raw-response:
|
|
||||||
- 'true'
|
|
||||||
x-stainless-retry-count:
|
|
||||||
- '0'
|
|
||||||
x-stainless-runtime:
|
|
||||||
- CPython
|
|
||||||
x-stainless-runtime-version:
|
|
||||||
- 3.12.7
|
|
||||||
method: POST
|
|
||||||
uri: https://api.openai.com/v1/chat/completions
|
|
||||||
response:
|
|
||||||
content: "{\n \"id\": \"chatcmpl-AnuRofLgmzWcDya5LILqYwIJYgFoq\",\n \"object\":
|
|
||||||
\"chat.completion\",\n \"created\": 1736458420,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
|
||||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
|
||||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
|
||||||
Answer: As an official Futel Football Club infopoint, my responsibility is to
|
|
||||||
provide detailed and accurate information about the club. This includes answering
|
|
||||||
questions regarding team statistics, player performances, upcoming fixtures,
|
|
||||||
ticketing and fan zone details, club history, and community initiatives. Our
|
|
||||||
focus is to ensure that fans and stakeholders have access to the latest and
|
|
||||||
most precise information about the club's on and off-pitch activities. If there's
|
|
||||||
anything specific you need to know, just let me know, and I'll be more than
|
|
||||||
happy to assist!\",\n \"refusal\": null\n },\n \"logprobs\":
|
|
||||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
|
||||||
177,\n \"completion_tokens\": 115,\n \"total_tokens\": 292,\n \"prompt_tokens_details\":
|
|
||||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
|
||||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
|
||||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
|
|
||||||
\"fp_703d4ff298\"\n}\n"
|
|
||||||
headers:
|
|
||||||
CF-Cache-Status:
|
|
||||||
- DYNAMIC
|
|
||||||
CF-RAY:
|
|
||||||
- 8ff78c066f37c002-ATL
|
|
||||||
Connection:
|
|
||||||
- keep-alive
|
|
||||||
Content-Encoding:
|
|
||||||
- gzip
|
|
||||||
Content-Type:
|
|
||||||
- application/json
|
|
||||||
Date:
|
|
||||||
- Thu, 09 Jan 2025 21:33:42 GMT
|
|
||||||
Server:
|
|
||||||
- cloudflare
|
|
||||||
Transfer-Encoding:
|
|
||||||
- chunked
|
|
||||||
X-Content-Type-Options:
|
|
||||||
- nosniff
|
|
||||||
access-control-expose-headers:
|
|
||||||
- X-Request-ID
|
|
||||||
alt-svc:
|
|
||||||
- h3=":443"; ma=86400
|
|
||||||
openai-organization:
|
|
||||||
- crewai-iuxna1
|
|
||||||
openai-processing-ms:
|
|
||||||
- '2459'
|
|
||||||
openai-version:
|
|
||||||
- '2020-10-01'
|
|
||||||
strict-transport-security:
|
|
||||||
- max-age=31536000; includeSubDomains; preload
|
|
||||||
x-ratelimit-limit-requests:
|
|
||||||
- '10000'
|
|
||||||
x-ratelimit-limit-tokens:
|
|
||||||
- '30000000'
|
|
||||||
x-ratelimit-remaining-requests:
|
|
||||||
- '9999'
|
|
||||||
x-ratelimit-remaining-tokens:
|
|
||||||
- '29999786'
|
|
||||||
x-ratelimit-reset-requests:
|
|
||||||
- 6ms
|
|
||||||
x-ratelimit-reset-tokens:
|
|
||||||
- 0s
|
|
||||||
x-request-id:
|
|
||||||
- req_a146dd27f040f39a576750970cca0f52
|
|
||||||
http_version: HTTP/1.1
|
|
||||||
status_code: 200
|
|
||||||
version: 1
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1464,35 +1464,39 @@ def test_dont_set_agents_step_callback_if_already_set():
|
|||||||
|
|
||||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
def test_crew_function_calling_llm():
|
def test_crew_function_calling_llm():
|
||||||
|
from unittest.mock import patch
|
||||||
|
|
||||||
from crewai import LLM
|
|
||||||
from crewai.tools import tool
|
from crewai.tools import tool
|
||||||
|
|
||||||
llm = LLM(model="gpt-4o-mini")
|
llm = "gpt-4o"
|
||||||
|
|
||||||
@tool
|
@tool
|
||||||
def look_up_greeting() -> str:
|
def learn_about_AI() -> str:
|
||||||
"""Tool used to retrieve a greeting."""
|
"""Useful for when you need to learn about AI to write an paragraph about it."""
|
||||||
return "Howdy!"
|
return "AI is a very broad field."
|
||||||
|
|
||||||
agent1 = Agent(
|
agent1 = Agent(
|
||||||
role="Greeter",
|
role="test role",
|
||||||
goal="Say hello.",
|
goal="test goal",
|
||||||
backstory="You are a friendly greeter.",
|
backstory="test backstory",
|
||||||
tools=[look_up_greeting],
|
tools=[learn_about_AI],
|
||||||
llm="gpt-4o-mini",
|
llm="gpt-4o-mini",
|
||||||
function_calling_llm=llm,
|
function_calling_llm=llm,
|
||||||
)
|
)
|
||||||
|
|
||||||
essay = Task(
|
essay = Task(
|
||||||
description="Look up the greeting and say it.",
|
description="Write and then review an small paragraph on AI until it's AMAZING",
|
||||||
expected_output="A greeting.",
|
expected_output="The final paragraph.",
|
||||||
agent=agent1,
|
agent=agent1,
|
||||||
)
|
)
|
||||||
|
tasks = [essay]
|
||||||
|
crew = Crew(agents=[agent1], tasks=tasks)
|
||||||
|
|
||||||
crew = Crew(agents=[agent1], tasks=[essay])
|
with patch.object(
|
||||||
result = crew.kickoff()
|
instructor, "from_litellm", wraps=instructor.from_litellm
|
||||||
assert result.raw == "Howdy!"
|
) as mock_from_litellm:
|
||||||
|
crew.kickoff()
|
||||||
|
mock_from_litellm.assert_called()
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
|
|||||||
289
tests/e2e_crew_tests.py
Normal file
289
tests/e2e_crew_tests.py
Normal file
@@ -0,0 +1,289 @@
|
|||||||
|
import asyncio
|
||||||
|
import os
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from crewai.agent import Agent
|
||||||
|
from crewai.crew import Crew
|
||||||
|
from crewai.crews.crew_output import CrewOutput
|
||||||
|
from crewai.process import Process
|
||||||
|
from crewai.task import Task
|
||||||
|
from crewai.tasks.conditional_task import ConditionalTask
|
||||||
|
|
||||||
|
|
||||||
|
def test_basic_crew_execution(default_agent):
|
||||||
|
"""Test basic crew execution using the default agent fixture."""
|
||||||
|
|
||||||
|
# Initialize agents by copying the default agent fixture
|
||||||
|
researcher = default_agent.copy()
|
||||||
|
researcher.role = "Researcher"
|
||||||
|
researcher.goal = "Research the latest advancements in AI."
|
||||||
|
researcher.backstory = "An expert in AI technologies."
|
||||||
|
|
||||||
|
writer = default_agent.copy()
|
||||||
|
writer.role = "Writer"
|
||||||
|
writer.goal = "Write an article based on research findings."
|
||||||
|
writer.backstory = "A professional writer specializing in technology topics."
|
||||||
|
|
||||||
|
# Define tasks
|
||||||
|
research_task = Task(
|
||||||
|
description="Provide a summary of the latest advancements in AI.",
|
||||||
|
expected_output="A detailed summary of recent AI advancements.",
|
||||||
|
agent=researcher,
|
||||||
|
)
|
||||||
|
|
||||||
|
writing_task = Task(
|
||||||
|
description="Write an article based on the research summary.",
|
||||||
|
expected_output="An engaging article on AI advancements.",
|
||||||
|
agent=writer,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create the crew
|
||||||
|
crew = Crew(
|
||||||
|
agents=[researcher, writer],
|
||||||
|
tasks=[research_task, writing_task],
|
||||||
|
process=Process.sequential,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Execute the crew
|
||||||
|
result = crew.kickoff()
|
||||||
|
|
||||||
|
# Assertions to verify the result
|
||||||
|
assert result is not None, "Crew execution did not return a result."
|
||||||
|
assert isinstance(result, CrewOutput), "Result is not an instance of CrewOutput."
|
||||||
|
assert (
|
||||||
|
"AI advancements" in result.raw
|
||||||
|
or "artificial intelligence" in result.raw.lower()
|
||||||
|
), "Result does not contain expected content."
|
||||||
|
|
||||||
|
|
||||||
|
def test_hierarchical_crew_with_manager(default_llm_config):
|
||||||
|
"""Test hierarchical crew execution with a manager agent."""
|
||||||
|
|
||||||
|
# Initialize agents using the default LLM config fixture
|
||||||
|
ceo = Agent(
|
||||||
|
role="CEO",
|
||||||
|
goal="Oversee the project and ensure quality deliverables.",
|
||||||
|
backstory="A seasoned executive with a keen eye for detail.",
|
||||||
|
llm=default_llm_config,
|
||||||
|
)
|
||||||
|
|
||||||
|
developer = Agent(
|
||||||
|
role="Developer",
|
||||||
|
goal="Implement software features as per requirements.",
|
||||||
|
backstory="An experienced software developer.",
|
||||||
|
llm=default_llm_config,
|
||||||
|
)
|
||||||
|
|
||||||
|
tester = Agent(
|
||||||
|
role="Tester",
|
||||||
|
goal="Test software features and report bugs.",
|
||||||
|
backstory="A meticulous QA engineer.",
|
||||||
|
llm=default_llm_config,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Define tasks
|
||||||
|
development_task = Task(
|
||||||
|
description="Develop the new authentication feature.",
|
||||||
|
expected_output="Code implementation of the authentication feature.",
|
||||||
|
agent=developer,
|
||||||
|
)
|
||||||
|
|
||||||
|
testing_task = Task(
|
||||||
|
description="Test the authentication feature for vulnerabilities.",
|
||||||
|
expected_output="A report on any found bugs or vulnerabilities.",
|
||||||
|
agent=tester,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create the crew with hierarchical process
|
||||||
|
crew = Crew(
|
||||||
|
agents=[ceo, developer, tester],
|
||||||
|
tasks=[development_task, testing_task],
|
||||||
|
process=Process.hierarchical,
|
||||||
|
manager_agent=ceo,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Execute the crew
|
||||||
|
result = crew.kickoff()
|
||||||
|
|
||||||
|
# Assertions to verify the result
|
||||||
|
assert result is not None, "Crew execution did not return a result."
|
||||||
|
assert isinstance(result, CrewOutput), "Result is not an instance of CrewOutput."
|
||||||
|
assert (
|
||||||
|
"authentication" in result.raw.lower()
|
||||||
|
), "Result does not contain expected content."
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_asynchronous_task_execution(default_llm_config):
|
||||||
|
"""Test crew execution with asynchronous tasks."""
|
||||||
|
|
||||||
|
# Initialize agent
|
||||||
|
data_processor = Agent(
|
||||||
|
role="Data Processor",
|
||||||
|
goal="Process large datasets efficiently.",
|
||||||
|
backstory="An expert in data processing and analysis.",
|
||||||
|
llm=default_llm_config,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Define tasks with async_execution=True
|
||||||
|
async_task1 = Task(
|
||||||
|
description="Process dataset A asynchronously.",
|
||||||
|
expected_output="Processed results of dataset A.",
|
||||||
|
agent=data_processor,
|
||||||
|
async_execution=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
async_task2 = Task(
|
||||||
|
description="Process dataset B asynchronously.",
|
||||||
|
expected_output="Processed results of dataset B.",
|
||||||
|
agent=data_processor,
|
||||||
|
async_execution=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create the crew
|
||||||
|
crew = Crew(
|
||||||
|
agents=[data_processor],
|
||||||
|
tasks=[async_task1, async_task2],
|
||||||
|
process=Process.sequential,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Execute the crew asynchronously
|
||||||
|
result = await crew.kickoff_async()
|
||||||
|
|
||||||
|
# Assertions to verify the result
|
||||||
|
assert result is not None, "Crew execution did not return a result."
|
||||||
|
assert isinstance(result, CrewOutput), "Result is not an instance of CrewOutput."
|
||||||
|
assert (
|
||||||
|
"dataset a" in result.raw.lower() or "dataset b" in result.raw.lower()
|
||||||
|
), "Result does not contain expected content."
|
||||||
|
|
||||||
|
|
||||||
|
def test_crew_with_conditional_task(default_llm_config):
|
||||||
|
"""Test crew execution that includes a conditional task."""
|
||||||
|
|
||||||
|
# Initialize agents
|
||||||
|
analyst = Agent(
|
||||||
|
role="Analyst",
|
||||||
|
goal="Analyze data and make decisions based on insights.",
|
||||||
|
backstory="A data analyst with experience in predictive modeling.",
|
||||||
|
llm=default_llm_config,
|
||||||
|
)
|
||||||
|
|
||||||
|
decision_maker = Agent(
|
||||||
|
role="Decision Maker",
|
||||||
|
goal="Make decisions based on analysis.",
|
||||||
|
backstory="An executive responsible for strategic decisions.",
|
||||||
|
llm=default_llm_config,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Define tasks
|
||||||
|
analysis_task = Task(
|
||||||
|
description="Analyze the quarterly financial data.",
|
||||||
|
expected_output="A report highlighting key financial insights.",
|
||||||
|
agent=analyst,
|
||||||
|
)
|
||||||
|
|
||||||
|
decision_task = ConditionalTask(
|
||||||
|
description="If the profit margin is below 10%, recommend cost-cutting measures.",
|
||||||
|
expected_output="Recommendations for reducing costs.",
|
||||||
|
agent=decision_maker,
|
||||||
|
condition=lambda output: "profit margin below 10%" in output.lower(),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create the crew
|
||||||
|
crew = Crew(
|
||||||
|
agents=[analyst, decision_maker],
|
||||||
|
tasks=[analysis_task, decision_task],
|
||||||
|
process=Process.sequential,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Execute the crew
|
||||||
|
result = crew.kickoff()
|
||||||
|
|
||||||
|
# Assertions to verify the result
|
||||||
|
assert result is not None, "Crew execution did not return a result."
|
||||||
|
assert isinstance(result, CrewOutput), "Result is not an instance of CrewOutput."
|
||||||
|
assert len(result.tasks_output) >= 1, "No tasks were executed."
|
||||||
|
|
||||||
|
|
||||||
|
def test_crew_with_output_file():
|
||||||
|
"""Test crew execution that writes output to a file."""
|
||||||
|
|
||||||
|
# Access the API key from environment variables
|
||||||
|
openai_api_key = os.environ.get("OPENAI_API_KEY")
|
||||||
|
assert openai_api_key, "OPENAI_API_KEY environment variable is not set."
|
||||||
|
|
||||||
|
# Create a temporary directory for output files
|
||||||
|
with tempfile.TemporaryDirectory() as tmpdirname:
|
||||||
|
|
||||||
|
# Initialize agent
|
||||||
|
content_creator = Agent(
|
||||||
|
role="Content Creator",
|
||||||
|
goal="Generate engaging blog content.",
|
||||||
|
backstory="A creative writer with a passion for storytelling.",
|
||||||
|
llm={"provider": "openai", "model": "gpt-4", "api_key": openai_api_key},
|
||||||
|
)
|
||||||
|
|
||||||
|
# Define task with output file
|
||||||
|
output_file_path = f"{tmpdirname}/blog_post.txt"
|
||||||
|
blog_task = Task(
|
||||||
|
description="Write a blog post about the benefits of remote work.",
|
||||||
|
expected_output="An informative and engaging blog post.",
|
||||||
|
agent=content_creator,
|
||||||
|
output_file=output_file_path,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create the crew
|
||||||
|
crew = Crew(
|
||||||
|
agents=[content_creator],
|
||||||
|
tasks=[blog_task],
|
||||||
|
process=Process.sequential,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Execute the crew
|
||||||
|
crew.kickoff()
|
||||||
|
|
||||||
|
# Assertions to verify the result
|
||||||
|
assert os.path.exists(output_file_path), "Output file was not created."
|
||||||
|
|
||||||
|
# Read the content from the file and perform assertions
|
||||||
|
with open(output_file_path, "r") as file:
|
||||||
|
content = file.read()
|
||||||
|
assert (
|
||||||
|
"remote work" in content.lower()
|
||||||
|
), "Output file does not contain expected content."
|
||||||
|
|
||||||
|
|
||||||
|
def test_invalid_hierarchical_process():
|
||||||
|
"""Test that an error is raised when using hierarchical process without a manager agent or manager_llm."""
|
||||||
|
with pytest.raises(ValueError) as exc_info:
|
||||||
|
Crew(
|
||||||
|
agents=[],
|
||||||
|
tasks=[],
|
||||||
|
process=Process.hierarchical, # Hierarchical process without a manager
|
||||||
|
)
|
||||||
|
assert "manager_llm or manager_agent is required" in str(exc_info.value)
|
||||||
|
|
||||||
|
|
||||||
|
def test_crew_with_memory(memory_agent, memory_tasks):
|
||||||
|
"""Test crew execution utilizing memory."""
|
||||||
|
|
||||||
|
# Enable memory in the crew
|
||||||
|
crew = Crew(
|
||||||
|
agents=[memory_agent],
|
||||||
|
tasks=memory_tasks,
|
||||||
|
process=Process.sequential,
|
||||||
|
memory=True, # Enable memory
|
||||||
|
)
|
||||||
|
|
||||||
|
# Execute the crew
|
||||||
|
result = crew.kickoff()
|
||||||
|
|
||||||
|
# Assertions to verify the result
|
||||||
|
assert result is not None, "Crew execution did not return a result."
|
||||||
|
assert isinstance(result, CrewOutput), "Result is not an instance of CrewOutput."
|
||||||
|
assert (
|
||||||
|
"history of ai" in result.raw.lower() and "future of ai" in result.raw.lower()
|
||||||
|
), "Result does not contain expected content."
|
||||||
@@ -1,103 +0,0 @@
|
|||||||
interactions:
|
|
||||||
- request:
|
|
||||||
body: '{"messages": [{"role": "system", "content": "You are Test Role. Test Backstory\nYour
|
|
||||||
personal goal is: Test Goal\nTo give my best complete final answer to the task
|
|
||||||
use the exact following format:\n\nThought: I now can give a great answer\nFinal
|
|
||||||
Answer: Your final answer must be the great and the most complete as possible,
|
|
||||||
it must be outcome described.\n\nI MUST use these formats, my job depends on
|
|
||||||
it!"}, {"role": "user", "content": "\nCurrent Task: Return: Test output\n\nThis
|
|
||||||
is the expect criteria for your final answer: Test output\nyou MUST return the
|
|
||||||
actual complete content as the final answer, not a summary.\n\nBegin! This is
|
|
||||||
VERY important to you, use the tools available and give your best Final Answer,
|
|
||||||
your job depends on it!\n\nThought:"}], "model": "gpt-4o"}'
|
|
||||||
headers:
|
|
||||||
accept:
|
|
||||||
- application/json
|
|
||||||
accept-encoding:
|
|
||||||
- gzip, deflate
|
|
||||||
connection:
|
|
||||||
- keep-alive
|
|
||||||
content-length:
|
|
||||||
- '776'
|
|
||||||
content-type:
|
|
||||||
- application/json
|
|
||||||
cookie:
|
|
||||||
- __cf_bm=9.8sBYBkvBR8R1K_bVF7xgU..80XKlEIg3N2OBbTSCU-1727214102-1.0.1.1-.qiTLXbPamYUMSuyNsOEB9jhGu.jOifujOrx9E2JZvStbIZ9RTIiE44xKKNfLPxQkOi6qAT3h6htK8lPDGV_5g;
|
|
||||||
_cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
|
|
||||||
host:
|
|
||||||
- api.openai.com
|
|
||||||
user-agent:
|
|
||||||
- OpenAI/Python 1.47.0
|
|
||||||
x-stainless-arch:
|
|
||||||
- arm64
|
|
||||||
x-stainless-async:
|
|
||||||
- 'false'
|
|
||||||
x-stainless-lang:
|
|
||||||
- python
|
|
||||||
x-stainless-os:
|
|
||||||
- MacOS
|
|
||||||
x-stainless-package-version:
|
|
||||||
- 1.47.0
|
|
||||||
x-stainless-raw-response:
|
|
||||||
- 'true'
|
|
||||||
x-stainless-runtime:
|
|
||||||
- CPython
|
|
||||||
x-stainless-runtime-version:
|
|
||||||
- 3.11.7
|
|
||||||
method: POST
|
|
||||||
uri: https://api.openai.com/v1/chat/completions
|
|
||||||
response:
|
|
||||||
content: "{\n \"id\": \"chatcmpl-AB7fr4aPstiFUArxwxTVdfJSFwxsC\",\n \"object\":
|
|
||||||
\"chat.completion\",\n \"created\": 1727214471,\n \"model\": \"gpt-4o-2024-05-13\",\n
|
|
||||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
|
||||||
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
|
||||||
Answer: Test output\",\n \"refusal\": null\n },\n \"logprobs\":
|
|
||||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
|
||||||
155,\n \"completion_tokens\": 15,\n \"total_tokens\": 170,\n \"completion_tokens_details\":
|
|
||||||
{\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_52a7f40b0b\"\n}\n"
|
|
||||||
headers:
|
|
||||||
CF-Cache-Status:
|
|
||||||
- DYNAMIC
|
|
||||||
CF-RAY:
|
|
||||||
- 8c85f9a91e311cf3-GRU
|
|
||||||
Connection:
|
|
||||||
- keep-alive
|
|
||||||
Content-Encoding:
|
|
||||||
- gzip
|
|
||||||
Content-Type:
|
|
||||||
- application/json
|
|
||||||
Date:
|
|
||||||
- Tue, 24 Sep 2024 21:47:51 GMT
|
|
||||||
Server:
|
|
||||||
- cloudflare
|
|
||||||
Transfer-Encoding:
|
|
||||||
- chunked
|
|
||||||
X-Content-Type-Options:
|
|
||||||
- nosniff
|
|
||||||
access-control-expose-headers:
|
|
||||||
- X-Request-ID
|
|
||||||
openai-organization:
|
|
||||||
- crewai-iuxna1
|
|
||||||
openai-processing-ms:
|
|
||||||
- '216'
|
|
||||||
openai-version:
|
|
||||||
- '2020-10-01'
|
|
||||||
strict-transport-security:
|
|
||||||
- max-age=31536000; includeSubDomains; preload
|
|
||||||
x-ratelimit-limit-requests:
|
|
||||||
- '10000'
|
|
||||||
x-ratelimit-limit-tokens:
|
|
||||||
- '30000000'
|
|
||||||
x-ratelimit-remaining-requests:
|
|
||||||
- '9999'
|
|
||||||
x-ratelimit-remaining-tokens:
|
|
||||||
- '29999817'
|
|
||||||
x-ratelimit-reset-requests:
|
|
||||||
- 6ms
|
|
||||||
x-ratelimit-reset-tokens:
|
|
||||||
- 0s
|
|
||||||
x-request-id:
|
|
||||||
- req_88b1376917b345c976fdb03a55f7b6c1
|
|
||||||
http_version: HTTP/1.1
|
|
||||||
status_code: 200
|
|
||||||
version: 1
|
|
||||||
@@ -1,6 +1,8 @@
|
|||||||
|
from unittest.mock import MagicMock
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from crewai import Agent
|
from crewai import Agent, Task
|
||||||
from crewai.tools.agent_tools.base_agent_tools import BaseAgentTool
|
from crewai.tools.agent_tools.base_agent_tools import BaseAgentTool
|
||||||
|
|
||||||
|
|
||||||
@@ -20,9 +22,12 @@ class InternalAgentTool(BaseAgentTool):
|
|||||||
("Futel Official Infopoint\n", True), # trailing newline
|
("Futel Official Infopoint\n", True), # trailing newline
|
||||||
('"Futel Official Infopoint"', True), # embedded quotes
|
('"Futel Official Infopoint"', True), # embedded quotes
|
||||||
(" FUTEL\nOFFICIAL INFOPOINT ", True), # multiple whitespace and newline
|
(" FUTEL\nOFFICIAL INFOPOINT ", True), # multiple whitespace and newline
|
||||||
|
("futel official infopoint", True), # lowercase
|
||||||
|
("FUTEL OFFICIAL INFOPOINT", True), # uppercase
|
||||||
|
("Non Existent Agent", False), # non-existent agent
|
||||||
|
(None, False), # None agent name
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
|
||||||
def test_agent_tool_role_matching(role_name, should_match):
|
def test_agent_tool_role_matching(role_name, should_match):
|
||||||
"""Test that agent tools can match roles regardless of case, whitespace, and special characters."""
|
"""Test that agent tools can match roles regardless of case, whitespace, and special characters."""
|
||||||
# Create test agent
|
# Create test agent
|
||||||
|
|||||||
@@ -121,113 +121,3 @@ def test_tool_usage_render():
|
|||||||
"Tool Name: Random Number Generator\nTool Arguments: {'min_value': {'description': 'The minimum value of the range (inclusive)', 'type': 'int'}, 'max_value': {'description': 'The maximum value of the range (inclusive)', 'type': 'int'}}\nTool Description: Generates a random number within a specified range"
|
"Tool Name: Random Number Generator\nTool Arguments: {'min_value': {'description': 'The minimum value of the range (inclusive)', 'type': 'int'}, 'max_value': {'description': 'The maximum value of the range (inclusive)', 'type': 'int'}}\nTool Description: Generates a random number within a specified range"
|
||||||
in rendered
|
in rendered
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def test_validate_tool_input_booleans_and_none():
|
|
||||||
# Create a ToolUsage instance with mocks
|
|
||||||
tool_usage = ToolUsage(
|
|
||||||
tools_handler=MagicMock(),
|
|
||||||
tools=[],
|
|
||||||
original_tools=[],
|
|
||||||
tools_description="",
|
|
||||||
tools_names="",
|
|
||||||
task=MagicMock(),
|
|
||||||
function_calling_llm=MagicMock(),
|
|
||||||
agent=MagicMock(),
|
|
||||||
action=MagicMock(),
|
|
||||||
)
|
|
||||||
|
|
||||||
# Input with booleans and None
|
|
||||||
tool_input = '{"key1": True, "key2": False, "key3": None}'
|
|
||||||
expected_arguments = {"key1": True, "key2": False, "key3": None}
|
|
||||||
|
|
||||||
arguments = tool_usage._validate_tool_input(tool_input)
|
|
||||||
assert arguments == expected_arguments
|
|
||||||
|
|
||||||
|
|
||||||
def test_validate_tool_input_mixed_types():
|
|
||||||
# Create a ToolUsage instance with mocks
|
|
||||||
tool_usage = ToolUsage(
|
|
||||||
tools_handler=MagicMock(),
|
|
||||||
tools=[],
|
|
||||||
original_tools=[],
|
|
||||||
tools_description="",
|
|
||||||
tools_names="",
|
|
||||||
task=MagicMock(),
|
|
||||||
function_calling_llm=MagicMock(),
|
|
||||||
agent=MagicMock(),
|
|
||||||
action=MagicMock(),
|
|
||||||
)
|
|
||||||
|
|
||||||
# Input with mixed types
|
|
||||||
tool_input = '{"number": 123, "text": "Some text", "flag": True}'
|
|
||||||
expected_arguments = {"number": 123, "text": "Some text", "flag": True}
|
|
||||||
|
|
||||||
arguments = tool_usage._validate_tool_input(tool_input)
|
|
||||||
assert arguments == expected_arguments
|
|
||||||
|
|
||||||
|
|
||||||
def test_validate_tool_input_single_quotes():
|
|
||||||
# Create a ToolUsage instance with mocks
|
|
||||||
tool_usage = ToolUsage(
|
|
||||||
tools_handler=MagicMock(),
|
|
||||||
tools=[],
|
|
||||||
original_tools=[],
|
|
||||||
tools_description="",
|
|
||||||
tools_names="",
|
|
||||||
task=MagicMock(),
|
|
||||||
function_calling_llm=MagicMock(),
|
|
||||||
agent=MagicMock(),
|
|
||||||
action=MagicMock(),
|
|
||||||
)
|
|
||||||
|
|
||||||
# Input with single quotes instead of double quotes
|
|
||||||
tool_input = "{'key': 'value', 'flag': True}"
|
|
||||||
expected_arguments = {"key": "value", "flag": True}
|
|
||||||
|
|
||||||
arguments = tool_usage._validate_tool_input(tool_input)
|
|
||||||
assert arguments == expected_arguments
|
|
||||||
|
|
||||||
|
|
||||||
def test_validate_tool_input_invalid_json_repairable():
|
|
||||||
# Create a ToolUsage instance with mocks
|
|
||||||
tool_usage = ToolUsage(
|
|
||||||
tools_handler=MagicMock(),
|
|
||||||
tools=[],
|
|
||||||
original_tools=[],
|
|
||||||
tools_description="",
|
|
||||||
tools_names="",
|
|
||||||
task=MagicMock(),
|
|
||||||
function_calling_llm=MagicMock(),
|
|
||||||
agent=MagicMock(),
|
|
||||||
action=MagicMock(),
|
|
||||||
)
|
|
||||||
|
|
||||||
# Invalid JSON input that can be repaired
|
|
||||||
tool_input = '{"key": "value", "list": [1, 2, 3,]}'
|
|
||||||
expected_arguments = {"key": "value", "list": [1, 2, 3]}
|
|
||||||
|
|
||||||
arguments = tool_usage._validate_tool_input(tool_input)
|
|
||||||
assert arguments == expected_arguments
|
|
||||||
|
|
||||||
|
|
||||||
def test_validate_tool_input_with_special_characters():
|
|
||||||
# Create a ToolUsage instance with mocks
|
|
||||||
tool_usage = ToolUsage(
|
|
||||||
tools_handler=MagicMock(),
|
|
||||||
tools=[],
|
|
||||||
original_tools=[],
|
|
||||||
tools_description="",
|
|
||||||
tools_names="",
|
|
||||||
task=MagicMock(),
|
|
||||||
function_calling_llm=MagicMock(),
|
|
||||||
agent=MagicMock(),
|
|
||||||
action=MagicMock(),
|
|
||||||
)
|
|
||||||
|
|
||||||
# Input with special characters
|
|
||||||
tool_input = '{"message": "Hello, world! \u263A", "valid": True}'
|
|
||||||
expected_arguments = {"message": "Hello, world! ☺", "valid": True}
|
|
||||||
|
|
||||||
arguments = tool_usage._validate_tool_input(tool_input)
|
|
||||||
assert arguments == expected_arguments
|
|
||||||
|
|||||||
Reference in New Issue
Block a user