mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-10 00:28:31 +00:00
Merge branch 'main' of github.com:crewAIInc/crewAI into fix/clone_when_using_knowledge
This commit is contained in:
@@ -12,7 +12,7 @@ The CrewAI CLI provides a set of commands to interact with CrewAI, allowing you
|
||||
|
||||
To use the CrewAI CLI, make sure you have CrewAI installed:
|
||||
|
||||
```shell
|
||||
```shell Terminal
|
||||
pip install crewai
|
||||
```
|
||||
|
||||
@@ -20,7 +20,7 @@ pip install crewai
|
||||
|
||||
The basic structure of a CrewAI CLI command is:
|
||||
|
||||
```shell
|
||||
```shell Terminal
|
||||
crewai [COMMAND] [OPTIONS] [ARGUMENTS]
|
||||
```
|
||||
|
||||
@@ -30,7 +30,7 @@ crewai [COMMAND] [OPTIONS] [ARGUMENTS]
|
||||
|
||||
Create a new crew or flow.
|
||||
|
||||
```shell
|
||||
```shell Terminal
|
||||
crewai create [OPTIONS] TYPE NAME
|
||||
```
|
||||
|
||||
@@ -38,7 +38,7 @@ crewai create [OPTIONS] TYPE NAME
|
||||
- `NAME`: Name of the crew or flow
|
||||
|
||||
Example:
|
||||
```shell
|
||||
```shell Terminal
|
||||
crewai create crew my_new_crew
|
||||
crewai create flow my_new_flow
|
||||
```
|
||||
@@ -47,14 +47,14 @@ crewai create flow my_new_flow
|
||||
|
||||
Show the installed version of CrewAI.
|
||||
|
||||
```shell
|
||||
```shell Terminal
|
||||
crewai version [OPTIONS]
|
||||
```
|
||||
|
||||
- `--tools`: (Optional) Show the installed version of CrewAI tools
|
||||
|
||||
Example:
|
||||
```shell
|
||||
```shell Terminal
|
||||
crewai version
|
||||
crewai version --tools
|
||||
```
|
||||
@@ -63,7 +63,7 @@ crewai version --tools
|
||||
|
||||
Train the crew for a specified number of iterations.
|
||||
|
||||
```shell
|
||||
```shell Terminal
|
||||
crewai train [OPTIONS]
|
||||
```
|
||||
|
||||
@@ -71,7 +71,7 @@ crewai train [OPTIONS]
|
||||
- `-f, --filename TEXT`: Path to a custom file for training (default: "trained_agents_data.pkl")
|
||||
|
||||
Example:
|
||||
```shell
|
||||
```shell Terminal
|
||||
crewai train -n 10 -f my_training_data.pkl
|
||||
```
|
||||
|
||||
@@ -79,14 +79,14 @@ crewai train -n 10 -f my_training_data.pkl
|
||||
|
||||
Replay the crew execution from a specific task.
|
||||
|
||||
```shell
|
||||
```shell Terminal
|
||||
crewai replay [OPTIONS]
|
||||
```
|
||||
|
||||
- `-t, --task_id TEXT`: Replay the crew from this task ID, including all subsequent tasks
|
||||
|
||||
Example:
|
||||
```shell
|
||||
```shell Terminal
|
||||
crewai replay -t task_123456
|
||||
```
|
||||
|
||||
@@ -94,7 +94,7 @@ crewai replay -t task_123456
|
||||
|
||||
Retrieve your latest crew.kickoff() task outputs.
|
||||
|
||||
```shell
|
||||
```shell Terminal
|
||||
crewai log-tasks-outputs
|
||||
```
|
||||
|
||||
@@ -102,7 +102,7 @@ crewai log-tasks-outputs
|
||||
|
||||
Reset the crew memories (long, short, entity, latest_crew_kickoff_outputs).
|
||||
|
||||
```shell
|
||||
```shell Terminal
|
||||
crewai reset-memories [OPTIONS]
|
||||
```
|
||||
|
||||
@@ -113,7 +113,7 @@ crewai reset-memories [OPTIONS]
|
||||
- `-a, --all`: Reset ALL memories
|
||||
|
||||
Example:
|
||||
```shell
|
||||
```shell Terminal
|
||||
crewai reset-memories --long --short
|
||||
crewai reset-memories --all
|
||||
```
|
||||
@@ -122,7 +122,7 @@ crewai reset-memories --all
|
||||
|
||||
Test the crew and evaluate the results.
|
||||
|
||||
```shell
|
||||
```shell Terminal
|
||||
crewai test [OPTIONS]
|
||||
```
|
||||
|
||||
@@ -130,7 +130,7 @@ crewai test [OPTIONS]
|
||||
- `-m, --model TEXT`: LLM Model to run the tests on the Crew (default: "gpt-4o-mini")
|
||||
|
||||
Example:
|
||||
```shell
|
||||
```shell Terminal
|
||||
crewai test -n 5 -m gpt-3.5-turbo
|
||||
```
|
||||
|
||||
@@ -138,7 +138,7 @@ crewai test -n 5 -m gpt-3.5-turbo
|
||||
|
||||
Run the crew.
|
||||
|
||||
```shell
|
||||
```shell Terminal
|
||||
crewai run
|
||||
```
|
||||
<Note>
|
||||
@@ -147,7 +147,36 @@ Some commands may require additional configuration or setup within your project
|
||||
</Note>
|
||||
|
||||
|
||||
### 9. API Keys
|
||||
### 9. Chat
|
||||
|
||||
Starting in version `0.98.0`, when you run the `crewai chat` command, you start an interactive session with your crew. The AI assistant will guide you by asking for necessary inputs to execute the crew. Once all inputs are provided, the crew will execute its tasks.
|
||||
|
||||
After receiving the results, you can continue interacting with the assistant for further instructions or questions.
|
||||
|
||||
```shell Terminal
|
||||
crewai chat
|
||||
```
|
||||
<Note>
|
||||
Ensure you execute these commands from your CrewAI project's root directory.
|
||||
</Note>
|
||||
<Note>
|
||||
IMPORTANT: Set the `chat_llm` property in your `crew.py` file to enable this command.
|
||||
|
||||
```python
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
return Crew(
|
||||
agents=self.agents,
|
||||
tasks=self.tasks,
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
chat_llm="gpt-4o", # LLM for chat orchestration
|
||||
)
|
||||
```
|
||||
</Note>
|
||||
|
||||
|
||||
### 10. API Keys
|
||||
|
||||
When running ```crewai create crew``` command, the CLI will first show you the top 5 most common LLM providers and ask you to select one.
|
||||
|
||||
|
||||
@@ -243,6 +243,9 @@ There are three ways to configure LLMs in CrewAI. Choose the method that best fi
|
||||
# llm: bedrock/amazon.titan-text-express-v1
|
||||
# llm: bedrock/meta.llama2-70b-chat-v1
|
||||
|
||||
# Amazon SageMaker Models - Enterprise-grade
|
||||
# llm: sagemaker/<my-endpoint>
|
||||
|
||||
# Mistral Models - Open source alternative
|
||||
# llm: mistral/mistral-large-latest
|
||||
# llm: mistral/mistral-medium-latest
|
||||
@@ -506,6 +509,21 @@ Learn how to get the most out of your LLM configuration:
|
||||
)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Amazon SageMaker">
|
||||
```python Code
|
||||
AWS_ACCESS_KEY_ID=<your-access-key>
|
||||
AWS_SECRET_ACCESS_KEY=<your-secret-key>
|
||||
AWS_DEFAULT_REGION=<your-region>
|
||||
```
|
||||
|
||||
Example usage:
|
||||
```python Code
|
||||
llm = LLM(
|
||||
model="sagemaker/<my-endpoint>"
|
||||
)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Mistral">
|
||||
```python Code
|
||||
|
||||
@@ -15,10 +15,48 @@ icon: wrench
|
||||
If you need to update Python, visit [python.org/downloads](https://python.org/downloads)
|
||||
</Note>
|
||||
|
||||
# Setting Up Your Environment
|
||||
|
||||
Before installing CrewAI, it's recommended to set up a virtual environment. This helps isolate your project dependencies and avoid conflicts.
|
||||
|
||||
<Steps>
|
||||
<Step title="Create a Virtual Environment">
|
||||
Choose your preferred method to create a virtual environment:
|
||||
|
||||
**Using venv (Python's built-in tool):**
|
||||
```shell Terminal
|
||||
python3 -m venv .venv
|
||||
```
|
||||
|
||||
**Using conda:**
|
||||
```shell Terminal
|
||||
conda create -n crewai-env python=3.12
|
||||
```
|
||||
</Step>
|
||||
|
||||
<Step title="Activate the Virtual Environment">
|
||||
Activate your virtual environment based on your platform:
|
||||
|
||||
**On macOS/Linux (venv):**
|
||||
```shell Terminal
|
||||
source .venv/bin/activate
|
||||
```
|
||||
|
||||
**On Windows (venv):**
|
||||
```shell Terminal
|
||||
.venv\Scripts\activate
|
||||
```
|
||||
|
||||
**Using conda (all platforms):**
|
||||
```shell Terminal
|
||||
conda activate crewai-env
|
||||
```
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
# Installing CrewAI
|
||||
|
||||
CrewAI is a flexible and powerful AI framework that enables you to create and manage AI agents, tools, and tasks efficiently.
|
||||
Let's get you set up! 🚀
|
||||
Now let's get you set up! 🚀
|
||||
|
||||
<Steps>
|
||||
<Step title="Install CrewAI">
|
||||
@@ -72,9 +110,9 @@ Let's get you set up! 🚀
|
||||
|
||||
# Creating a New Project
|
||||
|
||||
<Info>
|
||||
<Tip>
|
||||
We recommend using the YAML Template scaffolding for a structured approach to defining agents and tasks.
|
||||
</Info>
|
||||
</Tip>
|
||||
|
||||
<Steps>
|
||||
<Step title="Generate Project Structure">
|
||||
@@ -104,7 +142,18 @@ Let's get you set up! 🚀
|
||||
└── tasks.yaml
|
||||
```
|
||||
</Frame>
|
||||
</Step>
|
||||
</Step>
|
||||
|
||||
<Step title="Install Additional Tools">
|
||||
You can install additional tools using UV:
|
||||
```shell Terminal
|
||||
uv add <tool-name>
|
||||
```
|
||||
|
||||
<Tip>
|
||||
UV is our preferred package manager as it's significantly faster than pip and provides better dependency resolution.
|
||||
</Tip>
|
||||
</Step>
|
||||
|
||||
<Step title="Customize Your Project">
|
||||
Your project will contain these essential files:
|
||||
|
||||
@@ -278,7 +278,7 @@ email_summarizer:
|
||||
Summarize emails into a concise and clear summary
|
||||
backstory: >
|
||||
You will create a 5 bullet point summary of the report
|
||||
llm: mixtal_llm
|
||||
llm: openai/gpt-4o
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
@@ -1,78 +1,118 @@
|
||||
---
|
||||
title: Composio Tool
|
||||
description: The `ComposioTool` is a wrapper around the composio set of tools and gives your agent access to a wide variety of tools from the Composio SDK.
|
||||
description: Composio provides 250+ production-ready tools for AI agents with flexible authentication management.
|
||||
icon: gear-code
|
||||
---
|
||||
|
||||
# `ComposioTool`
|
||||
# `ComposioToolSet`
|
||||
|
||||
## Description
|
||||
Composio is an integration platform that allows you to connect your AI agents to 250+ tools. Key features include:
|
||||
|
||||
This tools is a wrapper around the composio set of tools and gives your agent access to a wide variety of tools from the Composio SDK.
|
||||
- **Enterprise-Grade Authentication**: Built-in support for OAuth, API Keys, JWT with automatic token refresh
|
||||
- **Full Observability**: Detailed tool usage logs, execution timestamps, and more
|
||||
|
||||
## Installation
|
||||
|
||||
To incorporate this tool into your project, follow the installation instructions below:
|
||||
To incorporate Composio tools into your project, follow the instructions below:
|
||||
|
||||
```shell
|
||||
pip install composio-core
|
||||
pip install 'crewai[tools]'
|
||||
pip install composio-crewai
|
||||
pip install crewai
|
||||
```
|
||||
|
||||
after the installation is complete, either run `composio login` or export your composio API key as `COMPOSIO_API_KEY`.
|
||||
After the installation is complete, either run `composio login` or export your composio API key as `COMPOSIO_API_KEY`. Get your Composio API key from [here](https://app.composio.dev)
|
||||
|
||||
## Example
|
||||
|
||||
The following example demonstrates how to initialize the tool and execute a github action:
|
||||
|
||||
1. Initialize Composio tools
|
||||
1. Initialize Composio toolset
|
||||
|
||||
```python Code
|
||||
from composio import App
|
||||
from crewai_tools import ComposioTool
|
||||
from crewai import Agent, Task
|
||||
from composio_crewai import ComposioToolSet, App, Action
|
||||
from crewai import Agent, Task, Crew
|
||||
|
||||
|
||||
tools = [ComposioTool.from_action(action=Action.GITHUB_ACTIVITY_STAR_REPO_FOR_AUTHENTICATED_USER)]
|
||||
toolset = ComposioToolSet()
|
||||
```
|
||||
|
||||
If you don't know what action you want to use, use `from_app` and `tags` filter to get relevant actions
|
||||
|
||||
2. Connect your GitHub account
|
||||
<CodeGroup>
|
||||
```shell CLI
|
||||
composio add github
|
||||
```
|
||||
```python Code
|
||||
tools = ComposioTool.from_app(App.GITHUB, tags=["important"])
|
||||
request = toolset.initiate_connection(app=App.GITHUB)
|
||||
print(f"Open this URL to authenticate: {request.redirectUrl}")
|
||||
```
|
||||
</CodeGroup>
|
||||
|
||||
or use `use_case` to search relevant actions
|
||||
3. Get Tools
|
||||
|
||||
- Retrieving all the tools from an app (not recommended for production):
|
||||
```python Code
|
||||
tools = ComposioTool.from_app(App.GITHUB, use_case="Star a github repository")
|
||||
tools = toolset.get_tools(apps=[App.GITHUB])
|
||||
```
|
||||
|
||||
2. Define agent
|
||||
- Filtering tools based on tags:
|
||||
```python Code
|
||||
tag = "users"
|
||||
|
||||
filtered_action_enums = toolset.find_actions_by_tags(
|
||||
App.GITHUB,
|
||||
tags=[tag],
|
||||
)
|
||||
|
||||
tools = toolset.get_tools(actions=filtered_action_enums)
|
||||
```
|
||||
|
||||
- Filtering tools based on use case:
|
||||
```python Code
|
||||
use_case = "Star a repository on GitHub"
|
||||
|
||||
filtered_action_enums = toolset.find_actions_by_use_case(
|
||||
App.GITHUB, use_case=use_case, advanced=False
|
||||
)
|
||||
|
||||
tools = toolset.get_tools(actions=filtered_action_enums)
|
||||
```
|
||||
<Tip>Set `advanced` to True to get actions for complex use cases</Tip>
|
||||
|
||||
- Using specific tools:
|
||||
|
||||
In this demo, we will use the `GITHUB_STAR_A_REPOSITORY_FOR_THE_AUTHENTICATED_USER` action from the GitHub app.
|
||||
```python Code
|
||||
tools = toolset.get_tools(
|
||||
actions=[Action.GITHUB_STAR_A_REPOSITORY_FOR_THE_AUTHENTICATED_USER]
|
||||
)
|
||||
```
|
||||
Learn more about filtering actions [here](https://docs.composio.dev/patterns/tools/use-tools/use-specific-actions)
|
||||
|
||||
4. Define agent
|
||||
|
||||
```python Code
|
||||
crewai_agent = Agent(
|
||||
role="Github Agent",
|
||||
goal="You take action on Github using Github APIs",
|
||||
backstory=(
|
||||
"You are AI agent that is responsible for taking actions on Github "
|
||||
"on users behalf. You need to take action on Github using Github APIs"
|
||||
),
|
||||
role="GitHub Agent",
|
||||
goal="You take action on GitHub using GitHub APIs",
|
||||
backstory="You are AI agent that is responsible for taking actions on GitHub on behalf of users using GitHub APIs",
|
||||
verbose=True,
|
||||
tools=tools,
|
||||
llm= # pass an llm
|
||||
)
|
||||
```
|
||||
|
||||
3. Execute task
|
||||
5. Execute task
|
||||
|
||||
```python Code
|
||||
task = Task(
|
||||
description="Star a repo ComposioHQ/composio on GitHub",
|
||||
description="Star a repo composiohq/composio on GitHub",
|
||||
agent=crewai_agent,
|
||||
expected_output="if the star happened",
|
||||
expected_output="Status of the operation",
|
||||
)
|
||||
|
||||
task.execute()
|
||||
crew = Crew(agents=[crewai_agent], tasks=[task])
|
||||
|
||||
crew.kickoff()
|
||||
```
|
||||
|
||||
* More detailed list of tools can be found [here](https://app.composio.dev)
|
||||
* More detailed list of tools can be found [here](https://app.composio.dev)
|
||||
|
||||
@@ -3,6 +3,7 @@ import shutil
|
||||
import subprocess
|
||||
from typing import Any, Dict, List, Literal, Optional, Union
|
||||
|
||||
from litellm import AuthenticationError as LiteLLMAuthenticationError
|
||||
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
|
||||
|
||||
from crewai.agents import CacheHandler
|
||||
@@ -258,6 +259,9 @@ class Agent(BaseAgent):
|
||||
}
|
||||
)["output"]
|
||||
except Exception as e:
|
||||
if isinstance(e, LiteLLMAuthenticationError):
|
||||
# Do not retry on authentication errors
|
||||
raise e
|
||||
self._times_executed += 1
|
||||
if self._times_executed > self.max_retry_limit:
|
||||
raise e
|
||||
|
||||
@@ -3,6 +3,8 @@ import re
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Callable, Dict, List, Optional, Union
|
||||
|
||||
from litellm.exceptions import AuthenticationError as LiteLLMAuthenticationError
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
|
||||
from crewai.agents.parser import (
|
||||
@@ -13,6 +15,7 @@ from crewai.agents.parser import (
|
||||
OutputParserException,
|
||||
)
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.llm import LLM
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
|
||||
from crewai.utilities import I18N, Printer
|
||||
@@ -54,7 +57,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
callbacks: List[Any] = [],
|
||||
):
|
||||
self._i18n: I18N = I18N()
|
||||
self.llm = llm
|
||||
self.llm: LLM = llm
|
||||
self.task = task
|
||||
self.agent = agent
|
||||
self.crew = crew
|
||||
@@ -80,10 +83,8 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self.tool_name_to_tool_map: Dict[str, BaseTool] = {
|
||||
tool.name: tool for tool in self.tools
|
||||
}
|
||||
if self.llm.stop:
|
||||
self.llm.stop = list(set(self.llm.stop + self.stop))
|
||||
else:
|
||||
self.llm.stop = self.stop
|
||||
self.stop = stop_words
|
||||
self.llm.stop = list(set(self.llm.stop + self.stop))
|
||||
|
||||
def invoke(self, inputs: Dict[str, str]) -> Dict[str, Any]:
|
||||
if "system" in self.prompt:
|
||||
@@ -98,7 +99,11 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self._show_start_logs()
|
||||
|
||||
self.ask_for_human_input = bool(inputs.get("ask_for_human_input", False))
|
||||
formatted_answer = self._invoke_loop()
|
||||
|
||||
try:
|
||||
formatted_answer = self._invoke_loop()
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
if self.ask_for_human_input:
|
||||
formatted_answer = self._handle_human_feedback(formatted_answer)
|
||||
@@ -124,7 +129,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self._enforce_rpm_limit()
|
||||
|
||||
answer = self._get_llm_response()
|
||||
|
||||
formatted_answer = self._process_llm_response(answer)
|
||||
|
||||
if isinstance(formatted_answer, AgentAction):
|
||||
@@ -145,10 +149,26 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
if self._is_context_length_exceeded(e):
|
||||
self._handle_context_length()
|
||||
continue
|
||||
else:
|
||||
self._handle_unknown_error(e)
|
||||
raise e
|
||||
finally:
|
||||
self.iterations += 1
|
||||
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
|
||||
def _handle_unknown_error(self, exception: Exception) -> None:
|
||||
"""Handle unknown errors by informing the user."""
|
||||
self._printer.print(
|
||||
content="An unknown error occurred. Please check the details below.",
|
||||
color="red",
|
||||
)
|
||||
self._printer.print(
|
||||
content=f"Error details: {exception}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
def _has_reached_max_iterations(self) -> bool:
|
||||
"""Check if the maximum number of iterations has been reached."""
|
||||
return self.iterations >= self.max_iter
|
||||
@@ -160,10 +180,17 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
|
||||
def _get_llm_response(self) -> str:
|
||||
"""Call the LLM and return the response, handling any invalid responses."""
|
||||
answer = self.llm.call(
|
||||
self.messages,
|
||||
callbacks=self.callbacks,
|
||||
)
|
||||
try:
|
||||
answer = self.llm.call(
|
||||
self.messages,
|
||||
callbacks=self.callbacks,
|
||||
)
|
||||
except Exception as e:
|
||||
self._printer.print(
|
||||
content=f"Error during LLM call: {e}",
|
||||
color="red",
|
||||
)
|
||||
raise e
|
||||
|
||||
if not answer:
|
||||
self._printer.print(
|
||||
@@ -184,7 +211,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
if FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE in e.error:
|
||||
answer = answer.split("Observation:")[0].strip()
|
||||
|
||||
self.iterations += 1
|
||||
return self._format_answer(answer)
|
||||
|
||||
def _handle_agent_action(
|
||||
|
||||
@@ -38,7 +38,6 @@ from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.telemetry import Telemetry
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.tools.base_tool import Tool
|
||||
from crewai.types.crew_chat import ChatInputs
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
from crewai.utilities import I18N, FileHandler, Logger, RPMController
|
||||
from crewai.utilities.constants import TRAINING_DATA_FILE
|
||||
@@ -85,6 +84,7 @@ class Crew(BaseModel):
|
||||
step_callback: Callback to be executed after each step for every agents execution.
|
||||
share_crew: Whether you want to share the complete crew information and execution with crewAI to make the library better, and allow us to train models.
|
||||
planning: Plan the crew execution and add the plan to the crew.
|
||||
chat_llm: The language model used for orchestrating chat interactions with the crew.
|
||||
"""
|
||||
|
||||
__hash__ = object.__hash__ # type: ignore
|
||||
|
||||
@@ -447,14 +447,12 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
def __init__(
|
||||
self,
|
||||
persistence: Optional[FlowPersistence] = None,
|
||||
restore_uuid: Optional[str] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize a new Flow instance.
|
||||
|
||||
Args:
|
||||
persistence: Optional persistence backend for storing flow states
|
||||
restore_uuid: Optional UUID to restore state from persistence
|
||||
**kwargs: Additional state values to initialize or override
|
||||
"""
|
||||
# Initialize basic instance attributes
|
||||
@@ -464,64 +462,12 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
self._method_outputs: List[Any] = [] # List to store all method outputs
|
||||
self._persistence: Optional[FlowPersistence] = persistence
|
||||
|
||||
# Validate state model before initialization
|
||||
if isinstance(self.initial_state, type):
|
||||
if issubclass(self.initial_state, BaseModel) and not issubclass(
|
||||
self.initial_state, FlowState
|
||||
):
|
||||
# Check if model has id field
|
||||
model_fields = getattr(self.initial_state, "model_fields", None)
|
||||
if not model_fields or "id" not in model_fields:
|
||||
raise ValueError("Flow state model must have an 'id' field")
|
||||
# Initialize state with initial values
|
||||
self._state = self._create_initial_state()
|
||||
|
||||
# Handle persistence and potential ID conflicts
|
||||
stored_state = None
|
||||
if self._persistence is not None:
|
||||
if (
|
||||
restore_uuid
|
||||
and kwargs
|
||||
and "id" in kwargs
|
||||
and restore_uuid != kwargs["id"]
|
||||
):
|
||||
raise ValueError(
|
||||
f"Conflicting IDs provided: restore_uuid='{restore_uuid}' "
|
||||
f"vs kwargs['id']='{kwargs['id']}'. Use only one ID for restoration."
|
||||
)
|
||||
|
||||
# Attempt to load state, prioritizing restore_uuid
|
||||
if restore_uuid:
|
||||
self._log_flow_event(f"Loading flow state from memory for UUID: {restore_uuid}", color="bold_yellow")
|
||||
stored_state = self._persistence.load_state(restore_uuid)
|
||||
if not stored_state:
|
||||
raise ValueError(
|
||||
f"No state found for restore_uuid='{restore_uuid}'"
|
||||
)
|
||||
elif kwargs and "id" in kwargs:
|
||||
self._log_flow_event(f"Loading flow state from memory for ID: {kwargs['id']}", color="bold_yellow")
|
||||
stored_state = self._persistence.load_state(kwargs["id"])
|
||||
if not stored_state:
|
||||
# For kwargs["id"], we allow creating new state if not found
|
||||
self._state = self._create_initial_state()
|
||||
if kwargs:
|
||||
self._initialize_state(kwargs)
|
||||
return
|
||||
|
||||
# Initialize state based on persistence and kwargs
|
||||
if stored_state:
|
||||
# Create initial state and restore from persistence
|
||||
self._state = self._create_initial_state()
|
||||
self._restore_state(stored_state)
|
||||
# Apply any additional kwargs to override specific fields
|
||||
if kwargs:
|
||||
filtered_kwargs = {k: v for k, v in kwargs.items() if k != "id"}
|
||||
if filtered_kwargs:
|
||||
self._initialize_state(filtered_kwargs)
|
||||
else:
|
||||
# No stored state, create new state with initial values
|
||||
self._state = self._create_initial_state()
|
||||
# Apply any additional kwargs
|
||||
if kwargs:
|
||||
self._initialize_state(kwargs)
|
||||
# Apply any additional kwargs
|
||||
if kwargs:
|
||||
self._initialize_state(kwargs)
|
||||
|
||||
self._telemetry.flow_creation_span(self.__class__.__name__)
|
||||
|
||||
@@ -635,18 +581,18 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
@property
|
||||
def flow_id(self) -> str:
|
||||
"""Returns the unique identifier of this flow instance.
|
||||
|
||||
|
||||
This property provides a consistent way to access the flow's unique identifier
|
||||
regardless of the underlying state implementation (dict or BaseModel).
|
||||
|
||||
|
||||
Returns:
|
||||
str: The flow's unique identifier, or an empty string if not found
|
||||
|
||||
|
||||
Note:
|
||||
This property safely handles both dictionary and BaseModel state types,
|
||||
returning an empty string if the ID cannot be retrieved rather than raising
|
||||
an exception.
|
||||
|
||||
|
||||
Example:
|
||||
```python
|
||||
flow = MyFlow()
|
||||
@@ -656,7 +602,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
try:
|
||||
if not hasattr(self, '_state'):
|
||||
return ""
|
||||
|
||||
|
||||
if isinstance(self._state, dict):
|
||||
return str(self._state.get("id", ""))
|
||||
elif isinstance(self._state, BaseModel):
|
||||
@@ -731,7 +677,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
"""
|
||||
# When restoring from persistence, use the stored ID
|
||||
stored_id = stored_state.get("id")
|
||||
self._log_flow_event(f"Restoring flow state from memory for ID: {stored_id}", color="bold_yellow")
|
||||
if not stored_id:
|
||||
raise ValueError("Stored state must have an 'id' field")
|
||||
|
||||
@@ -755,6 +700,36 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
raise TypeError(f"State must be dict or BaseModel, got {type(self._state)}")
|
||||
|
||||
def kickoff(self, inputs: Optional[Dict[str, Any]] = None) -> Any:
|
||||
"""Start the flow execution.
|
||||
|
||||
Args:
|
||||
inputs: Optional dictionary containing input values and potentially a state ID to restore
|
||||
"""
|
||||
# Handle state restoration if ID is provided in inputs
|
||||
if inputs and 'id' in inputs and self._persistence is not None:
|
||||
restore_uuid = inputs['id']
|
||||
stored_state = self._persistence.load_state(restore_uuid)
|
||||
|
||||
# Override the id in the state if it exists in inputs
|
||||
if 'id' in inputs:
|
||||
if isinstance(self._state, dict):
|
||||
self._state['id'] = inputs['id']
|
||||
elif isinstance(self._state, BaseModel):
|
||||
setattr(self._state, 'id', inputs['id'])
|
||||
|
||||
if stored_state:
|
||||
self._log_flow_event(f"Loading flow state from memory for UUID: {restore_uuid}", color="yellow")
|
||||
# Restore the state
|
||||
self._restore_state(stored_state)
|
||||
else:
|
||||
self._log_flow_event(f"No flow state found for UUID: {restore_uuid}", color="red")
|
||||
|
||||
# Apply any additional inputs after restoration
|
||||
filtered_inputs = {k: v for k, v in inputs.items() if k != 'id'}
|
||||
if filtered_inputs:
|
||||
self._initialize_state(filtered_inputs)
|
||||
|
||||
# Start flow execution
|
||||
self.event_emitter.send(
|
||||
self,
|
||||
event=FlowStartedEvent(
|
||||
@@ -762,10 +737,11 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
flow_name=self.__class__.__name__,
|
||||
),
|
||||
)
|
||||
self._log_flow_event(f"Flow started with ID: {self.flow_id}", color="yellow")
|
||||
self._log_flow_event(f"Flow started with ID: {self.flow_id}", color="bold_magenta")
|
||||
|
||||
if inputs is not None:
|
||||
if inputs is not None and 'id' not in inputs:
|
||||
self._initialize_state(inputs)
|
||||
|
||||
return asyncio.run(self.kickoff_async())
|
||||
|
||||
async def kickoff_async(self, inputs: Optional[Dict[str, Any]] = None) -> Any:
|
||||
@@ -1010,18 +986,18 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
|
||||
def _log_flow_event(self, message: str, color: str = "yellow", level: str = "info") -> None:
|
||||
"""Centralized logging method for flow events.
|
||||
|
||||
|
||||
This method provides a consistent interface for logging flow-related events,
|
||||
combining both console output with colors and proper logging levels.
|
||||
|
||||
|
||||
Args:
|
||||
message: The message to log
|
||||
color: Color to use for console output (default: yellow)
|
||||
Available colors: purple, red, bold_green, bold_purple,
|
||||
bold_blue, yellow, bold_yellow
|
||||
bold_blue, yellow, yellow
|
||||
level: Log level to use (default: info)
|
||||
Supported levels: info, warning
|
||||
|
||||
|
||||
Note:
|
||||
This method uses the Printer utility for colored console output
|
||||
and the standard logging module for log level support.
|
||||
@@ -1031,7 +1007,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
logger.info(message)
|
||||
elif level == "warning":
|
||||
logger.warning(message)
|
||||
|
||||
|
||||
def plot(self, filename: str = "crewai_flow") -> None:
|
||||
self._telemetry.flow_plotting_span(
|
||||
self.__class__.__name__, list(self._methods.keys())
|
||||
|
||||
@@ -54,57 +54,44 @@ LOG_MESSAGES = {
|
||||
|
||||
class PersistenceDecorator:
|
||||
"""Class to handle flow state persistence with consistent logging."""
|
||||
|
||||
|
||||
_printer = Printer() # Class-level printer instance
|
||||
|
||||
|
||||
@classmethod
|
||||
def persist_state(cls, flow_instance: Any, method_name: str, persistence_instance: FlowPersistence) -> None:
|
||||
"""Persist flow state with proper error handling and logging.
|
||||
|
||||
|
||||
This method handles the persistence of flow state data, including proper
|
||||
error handling and colored console output for status updates.
|
||||
|
||||
|
||||
Args:
|
||||
flow_instance: The flow instance whose state to persist
|
||||
method_name: Name of the method that triggered persistence
|
||||
persistence_instance: The persistence backend to use
|
||||
|
||||
|
||||
Raises:
|
||||
ValueError: If flow has no state or state lacks an ID
|
||||
RuntimeError: If state persistence fails
|
||||
AttributeError: If flow instance lacks required state attributes
|
||||
|
||||
Note:
|
||||
Uses bold_yellow color for success messages and red for errors.
|
||||
All operations are logged at appropriate levels (info/error).
|
||||
|
||||
Example:
|
||||
```python
|
||||
@persist
|
||||
def my_flow_method(self):
|
||||
# Method implementation
|
||||
pass
|
||||
# State will be automatically persisted after method execution
|
||||
```
|
||||
"""
|
||||
try:
|
||||
state = getattr(flow_instance, 'state', None)
|
||||
if state is None:
|
||||
raise ValueError("Flow instance has no state")
|
||||
|
||||
|
||||
flow_uuid: Optional[str] = None
|
||||
if isinstance(state, dict):
|
||||
flow_uuid = state.get('id')
|
||||
elif isinstance(state, BaseModel):
|
||||
flow_uuid = getattr(state, 'id', None)
|
||||
|
||||
|
||||
if not flow_uuid:
|
||||
raise ValueError("Flow state must have an 'id' field for persistence")
|
||||
|
||||
|
||||
# Log state saving with consistent message
|
||||
cls._printer.print(LOG_MESSAGES["save_state"].format(flow_uuid), color="bold_yellow")
|
||||
cls._printer.print(LOG_MESSAGES["save_state"].format(flow_uuid), color="cyan")
|
||||
logger.info(LOG_MESSAGES["save_state"].format(flow_uuid))
|
||||
|
||||
|
||||
try:
|
||||
persistence_instance.save_state(
|
||||
flow_uuid=flow_uuid,
|
||||
@@ -154,44 +141,79 @@ def persist(persistence: Optional[FlowPersistence] = None):
|
||||
def begin(self):
|
||||
pass
|
||||
"""
|
||||
|
||||
def decorator(target: Union[Type, Callable[..., T]]) -> Union[Type, Callable[..., T]]:
|
||||
"""Decorator that handles both class and method decoration."""
|
||||
actual_persistence = persistence or SQLiteFlowPersistence()
|
||||
|
||||
if isinstance(target, type):
|
||||
# Class decoration
|
||||
class_methods = {}
|
||||
for name, method in target.__dict__.items():
|
||||
if callable(method) and hasattr(method, "__is_flow_method__"):
|
||||
# Wrap each flow method with persistence
|
||||
if asyncio.iscoroutinefunction(method):
|
||||
@functools.wraps(method)
|
||||
async def class_async_wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
|
||||
method_coro = method(self, *args, **kwargs)
|
||||
if asyncio.iscoroutine(method_coro):
|
||||
result = await method_coro
|
||||
else:
|
||||
result = method_coro
|
||||
PersistenceDecorator.persist_state(self, method.__name__, actual_persistence)
|
||||
return result
|
||||
class_methods[name] = class_async_wrapper
|
||||
else:
|
||||
@functools.wraps(method)
|
||||
def class_sync_wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
|
||||
result = method(self, *args, **kwargs)
|
||||
PersistenceDecorator.persist_state(self, method.__name__, actual_persistence)
|
||||
return result
|
||||
class_methods[name] = class_sync_wrapper
|
||||
original_init = getattr(target, "__init__")
|
||||
|
||||
# Preserve flow-specific attributes
|
||||
@functools.wraps(original_init)
|
||||
def new_init(self: Any, *args: Any, **kwargs: Any) -> None:
|
||||
if 'persistence' not in kwargs:
|
||||
kwargs['persistence'] = actual_persistence
|
||||
original_init(self, *args, **kwargs)
|
||||
|
||||
setattr(target, "__init__", new_init)
|
||||
|
||||
# Store original methods to preserve their decorators
|
||||
original_methods = {}
|
||||
|
||||
for name, method in target.__dict__.items():
|
||||
if callable(method) and (
|
||||
hasattr(method, "__is_start_method__") or
|
||||
hasattr(method, "__trigger_methods__") or
|
||||
hasattr(method, "__condition_type__") or
|
||||
hasattr(method, "__is_flow_method__") or
|
||||
hasattr(method, "__is_router__")
|
||||
):
|
||||
original_methods[name] = method
|
||||
|
||||
# Create wrapped versions of the methods that include persistence
|
||||
for name, method in original_methods.items():
|
||||
if asyncio.iscoroutinefunction(method):
|
||||
# Create a closure to capture the current name and method
|
||||
def create_async_wrapper(method_name: str, original_method: Callable):
|
||||
@functools.wraps(original_method)
|
||||
async def method_wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
|
||||
result = await original_method(self, *args, **kwargs)
|
||||
PersistenceDecorator.persist_state(self, method_name, actual_persistence)
|
||||
return result
|
||||
return method_wrapper
|
||||
|
||||
wrapped = create_async_wrapper(name, method)
|
||||
|
||||
# Preserve all original decorators and attributes
|
||||
for attr in ["__is_start_method__", "__trigger_methods__", "__condition_type__", "__is_router__"]:
|
||||
if hasattr(method, attr):
|
||||
setattr(class_methods[name], attr, getattr(method, attr))
|
||||
setattr(class_methods[name], "__is_flow_method__", True)
|
||||
setattr(wrapped, attr, getattr(method, attr))
|
||||
setattr(wrapped, "__is_flow_method__", True)
|
||||
|
||||
# Update the class with the wrapped method
|
||||
setattr(target, name, wrapped)
|
||||
else:
|
||||
# Create a closure to capture the current name and method
|
||||
def create_sync_wrapper(method_name: str, original_method: Callable):
|
||||
@functools.wraps(original_method)
|
||||
def method_wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
|
||||
result = original_method(self, *args, **kwargs)
|
||||
PersistenceDecorator.persist_state(self, method_name, actual_persistence)
|
||||
return result
|
||||
return method_wrapper
|
||||
|
||||
wrapped = create_sync_wrapper(name, method)
|
||||
|
||||
# Preserve all original decorators and attributes
|
||||
for attr in ["__is_start_method__", "__trigger_methods__", "__condition_type__", "__is_router__"]:
|
||||
if hasattr(method, attr):
|
||||
setattr(wrapped, attr, getattr(method, attr))
|
||||
setattr(wrapped, "__is_flow_method__", True)
|
||||
|
||||
# Update the class with the wrapped method
|
||||
setattr(target, name, wrapped)
|
||||
|
||||
# Update class with wrapped methods
|
||||
for name, method in class_methods.items():
|
||||
setattr(target, name, method)
|
||||
return target
|
||||
else:
|
||||
# Method decoration
|
||||
@@ -208,6 +230,7 @@ def persist(persistence: Optional[FlowPersistence] = None):
|
||||
result = method_coro
|
||||
PersistenceDecorator.persist_state(flow_instance, method.__name__, actual_persistence)
|
||||
return result
|
||||
|
||||
for attr in ["__is_start_method__", "__trigger_methods__", "__condition_type__", "__is_router__"]:
|
||||
if hasattr(method, attr):
|
||||
setattr(method_async_wrapper, attr, getattr(method, attr))
|
||||
@@ -219,6 +242,7 @@ def persist(persistence: Optional[FlowPersistence] = None):
|
||||
result = method(flow_instance, *args, **kwargs)
|
||||
PersistenceDecorator.persist_state(flow_instance, method.__name__, actual_persistence)
|
||||
return result
|
||||
|
||||
for attr in ["__is_start_method__", "__trigger_methods__", "__condition_type__", "__is_router__"]:
|
||||
if hasattr(method, attr):
|
||||
setattr(method_sync_wrapper, attr, getattr(method, attr))
|
||||
|
||||
@@ -3,10 +3,9 @@ SQLite-based implementation of flow state persistence.
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sqlite3
|
||||
import tempfile
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional, Union
|
||||
|
||||
from pydantic import BaseModel
|
||||
@@ -16,34 +15,34 @@ from crewai.flow.persistence.base import FlowPersistence
|
||||
|
||||
class SQLiteFlowPersistence(FlowPersistence):
|
||||
"""SQLite-based implementation of flow state persistence.
|
||||
|
||||
|
||||
This class provides a simple, file-based persistence implementation using SQLite.
|
||||
It's suitable for development and testing, or for production use cases with
|
||||
moderate performance requirements.
|
||||
"""
|
||||
|
||||
|
||||
db_path: str # Type annotation for instance variable
|
||||
|
||||
|
||||
def __init__(self, db_path: Optional[str] = None):
|
||||
"""Initialize SQLite persistence.
|
||||
|
||||
|
||||
Args:
|
||||
db_path: Path to the SQLite database file. If not provided, uses
|
||||
db_storage_path() from utilities.paths.
|
||||
|
||||
|
||||
Raises:
|
||||
ValueError: If db_path is invalid
|
||||
"""
|
||||
from crewai.utilities.paths import db_storage_path
|
||||
# Get path from argument or default location
|
||||
path = db_path or db_storage_path()
|
||||
|
||||
path = db_path or str(Path(db_storage_path()) / "flow_states.db")
|
||||
|
||||
if not path:
|
||||
raise ValueError("Database path must be provided")
|
||||
|
||||
|
||||
self.db_path = path # Now mypy knows this is str
|
||||
self.init_db()
|
||||
|
||||
|
||||
def init_db(self) -> None:
|
||||
"""Create the necessary tables if they don't exist."""
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
@@ -58,10 +57,10 @@ class SQLiteFlowPersistence(FlowPersistence):
|
||||
""")
|
||||
# Add index for faster UUID lookups
|
||||
conn.execute("""
|
||||
CREATE INDEX IF NOT EXISTS idx_flow_states_uuid
|
||||
CREATE INDEX IF NOT EXISTS idx_flow_states_uuid
|
||||
ON flow_states(flow_uuid)
|
||||
""")
|
||||
|
||||
|
||||
def save_state(
|
||||
self,
|
||||
flow_uuid: str,
|
||||
@@ -69,7 +68,7 @@ class SQLiteFlowPersistence(FlowPersistence):
|
||||
state_data: Union[Dict[str, Any], BaseModel],
|
||||
) -> None:
|
||||
"""Save the current flow state to SQLite.
|
||||
|
||||
|
||||
Args:
|
||||
flow_uuid: Unique identifier for the flow instance
|
||||
method_name: Name of the method that just completed
|
||||
@@ -84,7 +83,7 @@ class SQLiteFlowPersistence(FlowPersistence):
|
||||
raise ValueError(
|
||||
f"state_data must be either a Pydantic BaseModel or dict, got {type(state_data)}"
|
||||
)
|
||||
|
||||
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
conn.execute("""
|
||||
INSERT INTO flow_states (
|
||||
@@ -99,13 +98,13 @@ class SQLiteFlowPersistence(FlowPersistence):
|
||||
datetime.utcnow().isoformat(),
|
||||
json.dumps(state_dict),
|
||||
))
|
||||
|
||||
|
||||
def load_state(self, flow_uuid: str) -> Optional[Dict[str, Any]]:
|
||||
"""Load the most recent state for a given flow UUID.
|
||||
|
||||
|
||||
Args:
|
||||
flow_uuid: Unique identifier for the flow instance
|
||||
|
||||
|
||||
Returns:
|
||||
The most recent state as a dictionary, or None if no state exists
|
||||
"""
|
||||
@@ -118,7 +117,7 @@ class SQLiteFlowPersistence(FlowPersistence):
|
||||
LIMIT 1
|
||||
""", (flow_uuid,))
|
||||
row = cursor.fetchone()
|
||||
|
||||
|
||||
if row:
|
||||
return json.loads(row[0])
|
||||
return None
|
||||
|
||||
@@ -142,7 +142,6 @@ class LLM:
|
||||
self.temperature = temperature
|
||||
self.top_p = top_p
|
||||
self.n = n
|
||||
self.stop = stop
|
||||
self.max_completion_tokens = max_completion_tokens
|
||||
self.max_tokens = max_tokens
|
||||
self.presence_penalty = presence_penalty
|
||||
@@ -160,37 +159,63 @@ class LLM:
|
||||
|
||||
litellm.drop_params = True
|
||||
|
||||
# Normalize self.stop to always be a List[str]
|
||||
if stop is None:
|
||||
self.stop: List[str] = []
|
||||
elif isinstance(stop, str):
|
||||
self.stop = [stop]
|
||||
else:
|
||||
self.stop = stop
|
||||
|
||||
self.set_callbacks(callbacks)
|
||||
self.set_env_callbacks()
|
||||
|
||||
def call(
|
||||
self,
|
||||
messages: List[Dict[str, str]],
|
||||
messages: Union[str, List[Dict[str, str]]],
|
||||
tools: Optional[List[dict]] = None,
|
||||
callbacks: Optional[List[Any]] = None,
|
||||
available_functions: Optional[Dict[str, Any]] = None,
|
||||
) -> str:
|
||||
"""
|
||||
High-level call method that:
|
||||
1) Calls litellm.completion
|
||||
2) Checks for function/tool calls
|
||||
3) If a tool call is found:
|
||||
a) executes the function
|
||||
b) returns the result
|
||||
4) If no tool call, returns the text response
|
||||
High-level llm call method that:
|
||||
1) Accepts either a string or a list of messages
|
||||
2) Converts string input to the required message format
|
||||
3) Calls litellm.completion
|
||||
4) Handles function/tool calls if any
|
||||
5) Returns the final text response or tool result
|
||||
|
||||
:param messages: The conversation messages
|
||||
:param tools: Optional list of function schemas for function calling
|
||||
:param callbacks: Optional list of callbacks
|
||||
:param available_functions: A dictionary mapping function_name -> actual Python function
|
||||
:return: Final text response from the LLM or the tool result
|
||||
Parameters:
|
||||
- messages (Union[str, List[Dict[str, str]]]): The input messages for the LLM.
|
||||
- If a string is provided, it will be converted into a message list with a single entry.
|
||||
- If a list of dictionaries is provided, each dictionary should have 'role' and 'content' keys.
|
||||
- tools (Optional[List[dict]]): A list of tool schemas for function calling.
|
||||
- callbacks (Optional[List[Any]]): A list of callback functions to be executed.
|
||||
- available_functions (Optional[Dict[str, Any]]): A dictionary mapping function names to actual Python functions.
|
||||
|
||||
Returns:
|
||||
- str: The final text response from the LLM or the result of a tool function call.
|
||||
|
||||
Examples:
|
||||
---------
|
||||
# Example 1: Using a string input
|
||||
response = llm.call("Return the name of a random city in the world.")
|
||||
print(response)
|
||||
|
||||
# Example 2: Using a list of messages
|
||||
messages = [{"role": "user", "content": "What is the capital of France?"}]
|
||||
response = llm.call(messages)
|
||||
print(response)
|
||||
"""
|
||||
if isinstance(messages, str):
|
||||
messages = [{"role": "user", "content": messages}]
|
||||
|
||||
with suppress_warnings():
|
||||
if callbacks and len(callbacks) > 0:
|
||||
self.set_callbacks(callbacks)
|
||||
|
||||
try:
|
||||
# --- 1) Make the completion call
|
||||
# --- 1) Prepare the parameters for the completion call
|
||||
params = {
|
||||
"model": self.model,
|
||||
"messages": messages,
|
||||
@@ -211,19 +236,21 @@ class LLM:
|
||||
"api_version": self.api_version,
|
||||
"api_key": self.api_key,
|
||||
"stream": False,
|
||||
"tools": tools, # pass the tool schema
|
||||
"tools": tools,
|
||||
}
|
||||
|
||||
# Remove None values from params
|
||||
params = {k: v for k, v in params.items() if v is not None}
|
||||
|
||||
# --- 2) Make the completion call
|
||||
response = litellm.completion(**params)
|
||||
response_message = cast(Choices, cast(ModelResponse, response).choices)[
|
||||
0
|
||||
].message
|
||||
text_response = response_message.content or ""
|
||||
tool_calls = getattr(response_message, "tool_calls", [])
|
||||
|
||||
# Ensure callbacks get the full response object with usage info
|
||||
|
||||
# --- 3) Handle callbacks with usage info
|
||||
if callbacks and len(callbacks) > 0:
|
||||
for callback in callbacks:
|
||||
if hasattr(callback, "log_success_event"):
|
||||
@@ -236,11 +263,11 @@ class LLM:
|
||||
end_time=0,
|
||||
)
|
||||
|
||||
# --- 2) If no tool calls, return the text response
|
||||
# --- 4) If no tool calls, return the text response
|
||||
if not tool_calls or not available_functions:
|
||||
return text_response
|
||||
|
||||
# --- 3) Handle the tool call
|
||||
# --- 5) Handle the tool call
|
||||
tool_call = tool_calls[0]
|
||||
function_name = tool_call.function.name
|
||||
|
||||
@@ -255,7 +282,6 @@ class LLM:
|
||||
try:
|
||||
# Call the actual tool function
|
||||
result = fn(**function_args)
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
|
||||
@@ -23,7 +23,7 @@ class KickoffTaskOutputsSQLiteStorage:
|
||||
) -> None:
|
||||
if db_path is None:
|
||||
# Get the parent directory of the default db path and create our db file there
|
||||
db_path = str(Path(db_storage_path()).parent / "latest_kickoff_task_outputs.db")
|
||||
db_path = str(Path(db_storage_path()) / "latest_kickoff_task_outputs.db")
|
||||
self.db_path = db_path
|
||||
self._printer: Printer = Printer()
|
||||
self._initialize_db()
|
||||
|
||||
@@ -17,7 +17,7 @@ class LTMSQLiteStorage:
|
||||
) -> None:
|
||||
if db_path is None:
|
||||
# Get the parent directory of the default db path and create our db file there
|
||||
db_path = str(Path(db_storage_path()).parent / "long_term_memory_storage.db")
|
||||
db_path = str(Path(db_storage_path()) / "long_term_memory_storage.db")
|
||||
self.db_path = db_path
|
||||
self._printer: Printer = Printer()
|
||||
# Ensure parent directory exists
|
||||
|
||||
@@ -24,12 +24,10 @@ def create_llm(
|
||||
|
||||
# 1) If llm_value is already an LLM object, return it directly
|
||||
if isinstance(llm_value, LLM):
|
||||
print("LLM value is already an LLM object")
|
||||
return llm_value
|
||||
|
||||
# 2) If llm_value is a string (model name)
|
||||
if isinstance(llm_value, str):
|
||||
print("LLM value is a string")
|
||||
try:
|
||||
created_llm = LLM(model=llm_value)
|
||||
return created_llm
|
||||
@@ -39,12 +37,10 @@ def create_llm(
|
||||
|
||||
# 3) If llm_value is None, parse environment variables or use default
|
||||
if llm_value is None:
|
||||
print("LLM value is None")
|
||||
return _llm_via_environment_or_fallback()
|
||||
|
||||
# 4) Otherwise, attempt to extract relevant attributes from an unknown object
|
||||
try:
|
||||
print("LLM value is an unknown object")
|
||||
# Extract attributes with explicit types
|
||||
model = (
|
||||
getattr(llm_value, "model_name", None)
|
||||
|
||||
@@ -7,7 +7,7 @@ import appdirs
|
||||
|
||||
def db_storage_path() -> str:
|
||||
"""Returns the path for SQLite database storage.
|
||||
|
||||
|
||||
Returns:
|
||||
str: Full path to the SQLite database file
|
||||
"""
|
||||
@@ -16,7 +16,7 @@ def db_storage_path() -> str:
|
||||
|
||||
data_dir = Path(appdirs.user_data_dir(app_name, app_author))
|
||||
data_dir.mkdir(parents=True, exist_ok=True)
|
||||
return str(data_dir / "crewai_flows.db")
|
||||
return str(data_dir)
|
||||
|
||||
|
||||
def get_project_directory_name():
|
||||
@@ -28,4 +28,4 @@ def get_project_directory_name():
|
||||
else:
|
||||
cwd = Path.cwd()
|
||||
project_directory_name = cwd.name
|
||||
return project_directory_name
|
||||
return project_directory_name
|
||||
@@ -21,6 +21,16 @@ class Printer:
|
||||
self._print_yellow(content)
|
||||
elif color == "bold_yellow":
|
||||
self._print_bold_yellow(content)
|
||||
elif color == "cyan":
|
||||
self._print_cyan(content)
|
||||
elif color == "bold_cyan":
|
||||
self._print_bold_cyan(content)
|
||||
elif color == "magenta":
|
||||
self._print_magenta(content)
|
||||
elif color == "bold_magenta":
|
||||
self._print_bold_magenta(content)
|
||||
elif color == "green":
|
||||
self._print_green(content)
|
||||
else:
|
||||
print(content)
|
||||
|
||||
@@ -44,3 +54,18 @@ class Printer:
|
||||
|
||||
def _print_bold_yellow(self, content):
|
||||
print("\033[1m\033[93m {}\033[00m".format(content))
|
||||
|
||||
def _print_cyan(self, content):
|
||||
print("\033[96m {}\033[00m".format(content))
|
||||
|
||||
def _print_bold_cyan(self, content):
|
||||
print("\033[1m\033[96m {}\033[00m".format(content))
|
||||
|
||||
def _print_magenta(self, content):
|
||||
print("\033[35m {}\033[00m".format(content))
|
||||
|
||||
def _print_bold_magenta(self, content):
|
||||
print("\033[1m\033[35m {}\033[00m".format(content))
|
||||
|
||||
def _print_green(self, content):
|
||||
print("\033[32m {}\033[00m".format(content))
|
||||
|
||||
@@ -16,7 +16,7 @@ from crewai.tools import tool
|
||||
from crewai.tools.tool_calling import InstructorToolCalling
|
||||
from crewai.tools.tool_usage import ToolUsage
|
||||
from crewai.tools.tool_usage_events import ToolUsageFinished
|
||||
from crewai.utilities import RPMController
|
||||
from crewai.utilities import Printer, RPMController
|
||||
from crewai.utilities.events import Emitter
|
||||
|
||||
|
||||
@@ -1622,3 +1622,103 @@ def test_agent_with_knowledge_sources_works_with_test():
|
||||
assert agent_copy.backstory == agent.backstory
|
||||
assert agent_copy.knowledge_sources == agent.knowledge_sources
|
||||
assert isinstance(agent_copy.llm, LLM)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_litellm_auth_error_handling():
|
||||
"""Test that LiteLLM authentication errors are handled correctly and not retried."""
|
||||
from litellm import AuthenticationError as LiteLLMAuthenticationError
|
||||
|
||||
# Create an agent with a mocked LLM and max_retry_limit=0
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
llm=LLM(model="gpt-4"),
|
||||
max_retry_limit=0, # Disable retries for authentication errors
|
||||
)
|
||||
|
||||
# Create a task
|
||||
task = Task(
|
||||
description="Test task",
|
||||
expected_output="Test output",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
# Mock the LLM call to raise LiteLLMAuthenticationError
|
||||
with (
|
||||
patch.object(LLM, "call") as mock_llm_call,
|
||||
pytest.raises(LiteLLMAuthenticationError, match="Invalid API key"),
|
||||
):
|
||||
mock_llm_call.side_effect = LiteLLMAuthenticationError(
|
||||
message="Invalid API key", llm_provider="openai", model="gpt-4"
|
||||
)
|
||||
agent.execute_task(task)
|
||||
|
||||
# Verify the call was only made once (no retries)
|
||||
mock_llm_call.assert_called_once()
|
||||
|
||||
|
||||
def test_crew_agent_executor_litellm_auth_error():
|
||||
"""Test that CrewAgentExecutor handles LiteLLM authentication errors by raising them."""
|
||||
from litellm import AuthenticationError as LiteLLMAuthenticationError
|
||||
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.utilities import Printer
|
||||
|
||||
# Create an agent and executor
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
llm=LLM(model="gpt-4", api_key="invalid_api_key"),
|
||||
)
|
||||
task = Task(
|
||||
description="Test task",
|
||||
expected_output="Test output",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
# Create executor with all required parameters
|
||||
executor = CrewAgentExecutor(
|
||||
agent=agent,
|
||||
task=task,
|
||||
llm=agent.llm,
|
||||
crew=None,
|
||||
prompt={"system": "You are a test agent", "user": "Execute the task: {input}"},
|
||||
max_iter=5,
|
||||
tools=[],
|
||||
tools_names="",
|
||||
stop_words=[],
|
||||
tools_description="",
|
||||
tools_handler=ToolsHandler(),
|
||||
)
|
||||
|
||||
# Mock the LLM call to raise LiteLLMAuthenticationError
|
||||
with (
|
||||
patch.object(LLM, "call") as mock_llm_call,
|
||||
patch.object(Printer, "print") as mock_printer,
|
||||
pytest.raises(LiteLLMAuthenticationError, match="Invalid API key"),
|
||||
):
|
||||
mock_llm_call.side_effect = LiteLLMAuthenticationError(
|
||||
message="Invalid API key", llm_provider="openai", model="gpt-4"
|
||||
)
|
||||
executor.invoke(
|
||||
{
|
||||
"input": "test input",
|
||||
"tool_names": "",
|
||||
"tools": "",
|
||||
}
|
||||
)
|
||||
|
||||
# Verify error handling
|
||||
mock_printer.assert_any_call(
|
||||
content="An unknown error occurred. Please check the details below.",
|
||||
color="red",
|
||||
)
|
||||
mock_printer.assert_any_call(
|
||||
content="Error details: litellm.AuthenticationError: Invalid API key",
|
||||
color="red",
|
||||
)
|
||||
# Verify the call was only made once (no retries)
|
||||
mock_llm_call.assert_called_once()
|
||||
|
||||
@@ -2,21 +2,21 @@ interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
||||
personal goal is: test goal\nYou ONLY have access to the following tools, and
|
||||
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer(*args:
|
||||
Any, **kwargs: Any) -> Any\nTool Description: get_final_answer() - Get the final
|
||||
answer but don''t give it yet, just re-use this tool non-stop. \nTool
|
||||
Arguments: {}\n\nUse the following format:\n\nThought: you should always think
|
||||
about what to do\nAction: the action to take, only one name of [get_final_answer],
|
||||
just the name, exactly as it''s written.\nAction Input: the input to the action,
|
||||
just a simple python dictionary, enclosed in curly braces, using \" to wrap
|
||||
keys and values.\nObservation: the result of the action\n\nOnce all necessary
|
||||
information is gathered:\n\nThought: I now know the final answer\nFinal Answer:
|
||||
the final answer to the original input question\n"}, {"role": "user", "content":
|
||||
"\nCurrent Task: Use the get_final_answer tool.\n\nThis is the expect criteria
|
||||
for your final answer: The final answer\nyou MUST return the actual complete
|
||||
content as the final answer, not a summary.\n\nBegin! This is VERY important
|
||||
to you, use the tools available and give your best Final Answer, your job depends
|
||||
on it!\n\nThought:"}], "model": "gpt-4o"}'
|
||||
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer\nTool
|
||||
Arguments: {}\nTool Description: Get the final answer but don''t give it yet,
|
||||
just re-use this\n tool non-stop.\n\nIMPORTANT: Use the following format
|
||||
in your response:\n\n```\nThought: you should always think about what to do\nAction:
|
||||
the action to take, only one name of [get_final_answer], just the name, exactly
|
||||
as it''s written.\nAction Input: the input to the action, just a simple JSON
|
||||
object, enclosed in curly braces, using \" to wrap keys and values.\nObservation:
|
||||
the result of the action\n```\n\nOnce all necessary information is gathered,
|
||||
return the following format:\n\n```\nThought: I now know the final answer\nFinal
|
||||
Answer: the final answer to the original input question\n```"}, {"role": "user",
|
||||
"content": "\nCurrent Task: Use the get_final_answer tool.\n\nThis is the expect
|
||||
criteria for your final answer: The final answer\nyou MUST return the actual
|
||||
complete content as the final answer, not a summary.\n\nBegin! This is VERY
|
||||
important to you, use the tools available and give your best Final Answer, your
|
||||
job depends on it!\n\nThought:"}], "model": "gpt-4o", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -25,16 +25,13 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1325'
|
||||
- '1367'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- _cfuvid=ePJSDFdHag2D8lj21_ijAMWjoA6xfnPNxN4uekvC728-1727226247743-0.0.1.1-604800000;
|
||||
__cf_bm=3giyBOIM0GNudFELtsBWYXwLrpLBTNLsh81wfXgu2tg-1727226247-1.0.1.1-ugUDz0c5EhmfVpyGtcdedlIWeDGuy2q0tXQTKVpv83HZhvxgBcS7SBL1wS4rapPM38yhfEcfwA79ARt3HQEzKA
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.47.0
|
||||
- OpenAI/Python 1.59.6
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -44,30 +41,35 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.47.0
|
||||
- 1.59.6
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
- 3.12.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-ABAtOWmVjvzQ9X58tKAUcOF4gmXwx\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727226842,\n \"model\": \"gpt-4o-2024-05-13\",\n
|
||||
content: "{\n \"id\": \"chatcmpl-AsXdf4OZKCZSigmN4k0gyh67NciqP\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1737562383,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I need to use the get_final_answer
|
||||
tool to determine the final answer.\\nAction: get_final_answer\\nAction Input:
|
||||
{}\",\n \"refusal\": null\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 274,\n \"completion_tokens\":
|
||||
27,\n \"total_tokens\": 301,\n \"completion_tokens_details\": {\n \"reasoning_tokens\":
|
||||
0\n }\n },\n \"system_fingerprint\": \"fp_e375328146\"\n}\n"
|
||||
\"assistant\",\n \"content\": \"```\\nThought: I have to use the available
|
||||
tool to get the final answer. Let's proceed with executing it.\\nAction: get_final_answer\\nAction
|
||||
Input: {}\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
274,\n \"completion_tokens\": 33,\n \"total_tokens\": 307,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_50cad350e4\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8c8727b3492f31e6-MIA
|
||||
- 9060d43e3be1d690-IAD
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -75,19 +77,27 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 25 Sep 2024 01:14:03 GMT
|
||||
- Wed, 22 Jan 2025 16:13:03 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=_Jcp7wnO_mXdvOnborCN6j8HwJxJXbszedJC1l7pFUg-1737562383-1.0.1.1-pDSLXlg.nKjG4wsT7mTJPjUvOX1UJITiS4MqKp6yfMWwRSJINsW1qC48SAcjBjakx2H5I1ESVk9JtUpUFDtf4g;
|
||||
path=/; expires=Wed, 22-Jan-25 16:43:03 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=x3SYvzL2nq_PTBGtE8R9cl5CkeaaDzZFQIrYfo91S2s-1737562383916-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '348'
|
||||
- '791'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -99,45 +109,59 @@ interactions:
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999682'
|
||||
- '29999680'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_be929caac49706f487950548bdcdd46e
|
||||
- req_eeed99acafd3aeb1e3d4a6c8063192b0
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
||||
personal goal is: test goal\nYou ONLY have access to the following tools, and
|
||||
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer(*args:
|
||||
Any, **kwargs: Any) -> Any\nTool Description: get_final_answer() - Get the final
|
||||
answer but don''t give it yet, just re-use this tool non-stop. \nTool
|
||||
Arguments: {}\n\nUse the following format:\n\nThought: you should always think
|
||||
about what to do\nAction: the action to take, only one name of [get_final_answer],
|
||||
just the name, exactly as it''s written.\nAction Input: the input to the action,
|
||||
just a simple python dictionary, enclosed in curly braces, using \" to wrap
|
||||
keys and values.\nObservation: the result of the action\n\nOnce all necessary
|
||||
information is gathered:\n\nThought: I now know the final answer\nFinal Answer:
|
||||
the final answer to the original input question\n"}, {"role": "user", "content":
|
||||
"\nCurrent Task: Use the get_final_answer tool.\n\nThis is the expect criteria
|
||||
for your final answer: The final answer\nyou MUST return the actual complete
|
||||
content as the final answer, not a summary.\n\nBegin! This is VERY important
|
||||
to you, use the tools available and give your best Final Answer, your job depends
|
||||
on it!\n\nThought:"}, {"role": "user", "content": "Thought: I need to use the
|
||||
get_final_answer tool to determine the final answer.\nAction: get_final_answer\nAction
|
||||
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer\nTool
|
||||
Arguments: {}\nTool Description: Get the final answer but don''t give it yet,
|
||||
just re-use this\n tool non-stop.\n\nIMPORTANT: Use the following format
|
||||
in your response:\n\n```\nThought: you should always think about what to do\nAction:
|
||||
the action to take, only one name of [get_final_answer], just the name, exactly
|
||||
as it''s written.\nAction Input: the input to the action, just a simple JSON
|
||||
object, enclosed in curly braces, using \" to wrap keys and values.\nObservation:
|
||||
the result of the action\n```\n\nOnce all necessary information is gathered,
|
||||
return the following format:\n\n```\nThought: I now know the final answer\nFinal
|
||||
Answer: the final answer to the original input question\n```"}, {"role": "user",
|
||||
"content": "\nCurrent Task: Use the get_final_answer tool.\n\nThis is the expect
|
||||
criteria for your final answer: The final answer\nyou MUST return the actual
|
||||
complete content as the final answer, not a summary.\n\nBegin! This is VERY
|
||||
important to you, use the tools available and give your best Final Answer, your
|
||||
job depends on it!\n\nThought:"}, {"role": "assistant", "content": "```\nThought:
|
||||
I have to use the available tool to get the final answer. Let''s proceed with
|
||||
executing it.\nAction: get_final_answer\nAction Input: {}\nObservation: I encountered
|
||||
an error: Error on parsing tool.\nMoving on then. I MUST either use a tool (use
|
||||
one at time) OR give my best final answer not both at the same time. When responding,
|
||||
I must use the following format:\n\n```\nThought: you should always think about
|
||||
what to do\nAction: the action to take, should be one of [get_final_answer]\nAction
|
||||
Input: the input to the action, dictionary enclosed in curly braces\nObservation:
|
||||
the result of the action\n```\nThis Thought/Action/Action Input/Result can repeat
|
||||
N times. Once I know the final answer, I must return the following format:\n\n```\nThought:
|
||||
I now can give a great answer\nFinal Answer: Your final answer must be the great
|
||||
and the most complete as possible, it must be outcome described\n\n```"}, {"role":
|
||||
"assistant", "content": "```\nThought: I have to use the available tool to get
|
||||
the final answer. Let''s proceed with executing it.\nAction: get_final_answer\nAction
|
||||
Input: {}\nObservation: I encountered an error: Error on parsing tool.\nMoving
|
||||
on then. I MUST either use a tool (use one at time) OR give my best final answer
|
||||
not both at the same time. To Use the following format:\n\nThought: you should
|
||||
always think about what to do\nAction: the action to take, should be one of
|
||||
[get_final_answer]\nAction Input: the input to the action, dictionary enclosed
|
||||
in curly braces\nObservation: the result of the action\n... (this Thought/Action/Action
|
||||
Input/Result can repeat N times)\nThought: I now can give a great answer\nFinal
|
||||
not both at the same time. When responding, I must use the following format:\n\n```\nThought:
|
||||
you should always think about what to do\nAction: the action to take, should
|
||||
be one of [get_final_answer]\nAction Input: the input to the action, dictionary
|
||||
enclosed in curly braces\nObservation: the result of the action\n```\nThis Thought/Action/Action
|
||||
Input/Result can repeat N times. Once I know the final answer, I must return
|
||||
the following format:\n\n```\nThought: I now can give a great answer\nFinal
|
||||
Answer: Your final answer must be the great and the most complete as possible,
|
||||
it must be outcome described\n\n \nNow it''s time you MUST give your absolute
|
||||
it must be outcome described\n\n```\nNow it''s time you MUST give your absolute
|
||||
best final answer. You''ll ignore all previous instructions, stop using any
|
||||
tools, and just return your absolute BEST Final answer."}], "model": "gpt-4o"}'
|
||||
tools, and just return your absolute BEST Final answer."}], "model": "gpt-4o",
|
||||
"stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -146,16 +170,16 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '2320'
|
||||
- '3445'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- _cfuvid=ePJSDFdHag2D8lj21_ijAMWjoA6xfnPNxN4uekvC728-1727226247743-0.0.1.1-604800000;
|
||||
__cf_bm=3giyBOIM0GNudFELtsBWYXwLrpLBTNLsh81wfXgu2tg-1727226247-1.0.1.1-ugUDz0c5EhmfVpyGtcdedlIWeDGuy2q0tXQTKVpv83HZhvxgBcS7SBL1wS4rapPM38yhfEcfwA79ARt3HQEzKA
|
||||
- __cf_bm=_Jcp7wnO_mXdvOnborCN6j8HwJxJXbszedJC1l7pFUg-1737562383-1.0.1.1-pDSLXlg.nKjG4wsT7mTJPjUvOX1UJITiS4MqKp6yfMWwRSJINsW1qC48SAcjBjakx2H5I1ESVk9JtUpUFDtf4g;
|
||||
_cfuvid=x3SYvzL2nq_PTBGtE8R9cl5CkeaaDzZFQIrYfo91S2s-1737562383916-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.47.0
|
||||
- OpenAI/Python 1.59.6
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -165,29 +189,36 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.47.0
|
||||
- 1.59.6
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
- 3.12.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-ABAtPaaeRfdNsZ3k06CfAmrEW8IJu\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727226843,\n \"model\": \"gpt-4o-2024-05-13\",\n
|
||||
content: "{\n \"id\": \"chatcmpl-AsXdg9UrLvAiqWP979E6DszLsQ84k\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1737562384,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Final Answer: The final answer\",\n \"refusal\":
|
||||
null\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 483,\n \"completion_tokens\":
|
||||
6,\n \"total_tokens\": 489,\n \"completion_tokens_details\": {\n \"reasoning_tokens\":
|
||||
0\n }\n },\n \"system_fingerprint\": \"fp_e375328146\"\n}\n"
|
||||
\"assistant\",\n \"content\": \"```\\nThought: I now know the final answer\\nFinal
|
||||
Answer: The final answer must be the great and the most complete as possible,
|
||||
it must be outcome described.\\n```\",\n \"refusal\": null\n },\n
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||
\ \"usage\": {\n \"prompt_tokens\": 719,\n \"completion_tokens\": 35,\n
|
||||
\ \"total_tokens\": 754,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_50cad350e4\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8c8727b9da1f31e6-MIA
|
||||
- 9060d4441edad690-IAD
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -195,7 +226,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 25 Sep 2024 01:14:03 GMT
|
||||
- Wed, 22 Jan 2025 16:13:05 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -209,7 +240,7 @@ interactions:
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '188'
|
||||
- '928'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -221,13 +252,13 @@ interactions:
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999445'
|
||||
- '29999187'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 1ms
|
||||
x-request-id:
|
||||
- req_d8e32538689fe064627468bad802d9a8
|
||||
- req_61fc7506e6db326ec572224aec81ef23
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
version: 1
|
||||
|
||||
102
tests/cassettes/test_llm_call_with_message_list.yaml
Normal file
102
tests/cassettes/test_llm_call_with_message_list.yaml
Normal file
@@ -0,0 +1,102 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "What is the capital of France?"}],
|
||||
"model": "gpt-4o-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '101'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- _cfuvid=8NrWEBP3dDmc8p2.csR.EdsSwS8zFvzWI1kPICaK_fM-1737568015338-0.0.1.1-604800000;
|
||||
__cf_bm=pKr3NwXmTZN9rMSlKvEX40VPKbrxF93QwDNHunL2v8Y-1737568015-1.0.1.1-nR0EA7hYIwWpIBYUI53d9xQrUnl5iML6lgz4AGJW4ZGPBDxFma3PZ2cBhlr_hE7wKa5fV3r32eMu_rNWMXD.eA
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.59.6
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.59.6
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AsZ6WjNfEOrHwwEEdSZZCRBiTpBMS\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1737568016,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"The capital of France is Paris.\",\n
|
||||
\ \"refusal\": null\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 14,\n \"completion_tokens\":
|
||||
8,\n \"total_tokens\": 22,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 90615dc63b805cb1-RDU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 22 Jan 2025 17:46:56 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '355'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999974'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_cdbed69c9c63658eb552b07f1220df19
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
version: 1
|
||||
108
tests/cassettes/test_llm_call_with_string_input.yaml
Normal file
108
tests/cassettes/test_llm_call_with_string_input.yaml
Normal file
@@ -0,0 +1,108 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Return the name of a random
|
||||
city in the world."}], "model": "gpt-4o-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '117'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- _cfuvid=3UeEmz_rnmsoZxrVUv32u35gJOi766GDWNe5_RTjiPk-1736537376739-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.59.6
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.59.6
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AsZ6UtbaNSMpNU9VJKxvn52t5eJTq\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1737568014,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"How about \\\"Lisbon\\\"? It\u2019s the
|
||||
capital city of Portugal, known for its rich history and vibrant culture.\",\n
|
||||
\ \"refusal\": null\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 18,\n \"completion_tokens\":
|
||||
24,\n \"total_tokens\": 42,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 90615dbcaefb5cb1-RDU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 22 Jan 2025 17:46:55 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=pKr3NwXmTZN9rMSlKvEX40VPKbrxF93QwDNHunL2v8Y-1737568015-1.0.1.1-nR0EA7hYIwWpIBYUI53d9xQrUnl5iML6lgz4AGJW4ZGPBDxFma3PZ2cBhlr_hE7wKa5fV3r32eMu_rNWMXD.eA;
|
||||
path=/; expires=Wed, 22-Jan-25 18:16:55 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=8NrWEBP3dDmc8p2.csR.EdsSwS8zFvzWI1kPICaK_fM-1737568015338-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '449'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999971'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_898373758d2eae3cd84814050b2588e3
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
version: 1
|
||||
@@ -0,0 +1,102 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Tell me a joke."}], "model":
|
||||
"gpt-4o-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '86'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- _cfuvid=8NrWEBP3dDmc8p2.csR.EdsSwS8zFvzWI1kPICaK_fM-1737568015338-0.0.1.1-604800000;
|
||||
__cf_bm=pKr3NwXmTZN9rMSlKvEX40VPKbrxF93QwDNHunL2v8Y-1737568015-1.0.1.1-nR0EA7hYIwWpIBYUI53d9xQrUnl5iML6lgz4AGJW4ZGPBDxFma3PZ2cBhlr_hE7wKa5fV3r32eMu_rNWMXD.eA
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.59.6
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.59.6
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AsZ6VyjuUcXYpChXmD8rUSy6nSGq8\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1737568015,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Why did the scarecrow win an award? \\n\\nBecause
|
||||
he was outstanding in his field!\",\n \"refusal\": null\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
12,\n \"completion_tokens\": 19,\n \"total_tokens\": 31,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 90615dc03b6c5cb1-RDU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 22 Jan 2025 17:46:56 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '825'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999979'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_4c1485d44e7461396d4a7316a63ff353
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
version: 1
|
||||
111
tests/cassettes/test_llm_call_with_tool_and_message_list.yaml
Normal file
111
tests/cassettes/test_llm_call_with_tool_and_message_list.yaml
Normal file
@@ -0,0 +1,111 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "What is the square of 5?"}],
|
||||
"model": "gpt-4o-mini", "tools": [{"type": "function", "function": {"name":
|
||||
"square_number", "description": "Returns the square of a number.", "parameters":
|
||||
{"type": "object", "properties": {"number": {"type": "integer", "description":
|
||||
"The number to square"}}, "required": ["number"]}}}]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '361'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.59.6
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.59.6
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AsZL5nGOaVpcGnDOesTxBZPHhMoaS\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1737568919,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
|
||||
\ \"id\": \"call_i6JVJ1KxX79A4WzFri98E03U\",\n \"type\":
|
||||
\"function\",\n \"function\": {\n \"name\": \"square_number\",\n
|
||||
\ \"arguments\": \"{\\\"number\\\":5}\"\n }\n }\n
|
||||
\ ],\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"tool_calls\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
58,\n \"completion_tokens\": 15,\n \"total_tokens\": 73,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 906173d229b905f6-IAD
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 22 Jan 2025 18:02:00 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=BYDpIoqfPZyRxl9xcFxkt4IzTUGe8irWQlZ.aYLt8Xc-1737568920-1.0.1.1-Y_cVFN7TbguWRBorSKZynVY02QUtYbsbHuR2gR1wJ8LHuqOF4xIxtK5iHVCpWWgIyPDol9xOXiqUkU8xRV_vHA;
|
||||
path=/; expires=Wed, 22-Jan-25 18:32:00 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=etTqqA9SBOnENmrFAUBIexdW0v2ZeO1x9_Ek_WChlfU-1737568920137-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '642'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999976'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_388e63f9b8d4edc0dd153001f25388e5
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
version: 1
|
||||
107
tests/cassettes/test_llm_call_with_tool_and_string_input.yaml
Normal file
107
tests/cassettes/test_llm_call_with_tool_and_string_input.yaml
Normal file
@@ -0,0 +1,107 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "What is the current year?"}],
|
||||
"model": "gpt-4o-mini", "tools": [{"type": "function", "function": {"name":
|
||||
"get_current_year", "description": "Returns the current year as a string.",
|
||||
"parameters": {"type": "object", "properties": {}, "required": []}}}]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '295'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- _cfuvid=8NrWEBP3dDmc8p2.csR.EdsSwS8zFvzWI1kPICaK_fM-1737568015338-0.0.1.1-604800000;
|
||||
__cf_bm=pKr3NwXmTZN9rMSlKvEX40VPKbrxF93QwDNHunL2v8Y-1737568015-1.0.1.1-nR0EA7hYIwWpIBYUI53d9xQrUnl5iML6lgz4AGJW4ZGPBDxFma3PZ2cBhlr_hE7wKa5fV3r32eMu_rNWMXD.eA
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.59.6
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.59.6
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AsZJ8HKXQU9nTB7xbGAkKxqrg9BZ2\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1737568798,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
|
||||
\ \"id\": \"call_mfvEs2jngeFloVZpZOHZVaKY\",\n \"type\":
|
||||
\"function\",\n \"function\": {\n \"name\": \"get_current_year\",\n
|
||||
\ \"arguments\": \"{}\"\n }\n }\n ],\n
|
||||
\ \"refusal\": null\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"tool_calls\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 46,\n \"completion_tokens\":
|
||||
12,\n \"total_tokens\": 58,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_72ed7ab54c\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 906170e038281775-IAD
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 22 Jan 2025 17:59:59 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '416'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999975'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_4039a5e5772d1790a3131f0b1ea06139
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
version: 1
|
||||
@@ -4,6 +4,7 @@ import pytest
|
||||
|
||||
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
|
||||
from crewai.llm import LLM
|
||||
from crewai.tools import tool
|
||||
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||
|
||||
|
||||
@@ -37,3 +38,119 @@ def test_llm_callback_replacement():
|
||||
assert usage_metrics_1.successful_requests == 1
|
||||
assert usage_metrics_2.successful_requests == 1
|
||||
assert usage_metrics_1 == calc_handler_1.token_cost_process.get_summary()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_llm_call_with_string_input():
|
||||
llm = LLM(model="gpt-4o-mini")
|
||||
|
||||
# Test the call method with a string input
|
||||
result = llm.call("Return the name of a random city in the world.")
|
||||
assert isinstance(result, str)
|
||||
assert len(result.strip()) > 0 # Ensure the response is not empty
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_llm_call_with_string_input_and_callbacks():
|
||||
llm = LLM(model="gpt-4o-mini")
|
||||
calc_handler = TokenCalcHandler(token_cost_process=TokenProcess())
|
||||
|
||||
# Test the call method with a string input and callbacks
|
||||
result = llm.call(
|
||||
"Tell me a joke.",
|
||||
callbacks=[calc_handler],
|
||||
)
|
||||
usage_metrics = calc_handler.token_cost_process.get_summary()
|
||||
|
||||
assert isinstance(result, str)
|
||||
assert len(result.strip()) > 0
|
||||
assert usage_metrics.successful_requests == 1
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_llm_call_with_message_list():
|
||||
llm = LLM(model="gpt-4o-mini")
|
||||
messages = [{"role": "user", "content": "What is the capital of France?"}]
|
||||
|
||||
# Test the call method with a list of messages
|
||||
result = llm.call(messages)
|
||||
assert isinstance(result, str)
|
||||
assert "Paris" in result
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_llm_call_with_tool_and_string_input():
|
||||
llm = LLM(model="gpt-4o-mini")
|
||||
|
||||
def get_current_year() -> str:
|
||||
"""Returns the current year as a string."""
|
||||
from datetime import datetime
|
||||
|
||||
return str(datetime.now().year)
|
||||
|
||||
# Create tool schema
|
||||
tool_schema = {
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "get_current_year",
|
||||
"description": "Returns the current year as a string.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {},
|
||||
"required": [],
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
# Available functions mapping
|
||||
available_functions = {"get_current_year": get_current_year}
|
||||
|
||||
# Test the call method with a string input and tool
|
||||
result = llm.call(
|
||||
"What is the current year?",
|
||||
tools=[tool_schema],
|
||||
available_functions=available_functions,
|
||||
)
|
||||
|
||||
assert isinstance(result, str)
|
||||
assert result == get_current_year()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_llm_call_with_tool_and_message_list():
|
||||
llm = LLM(model="gpt-4o-mini")
|
||||
|
||||
def square_number(number: int) -> int:
|
||||
"""Returns the square of a number."""
|
||||
return number * number
|
||||
|
||||
# Create tool schema
|
||||
tool_schema = {
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "square_number",
|
||||
"description": "Returns the square of a number.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"number": {"type": "integer", "description": "The number to square"}
|
||||
},
|
||||
"required": ["number"],
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
# Available functions mapping
|
||||
available_functions = {"square_number": square_number}
|
||||
|
||||
messages = [{"role": "user", "content": "What is the square of 5?"}]
|
||||
|
||||
# Test the call method with messages and tool
|
||||
result = llm.call(
|
||||
messages,
|
||||
tools=[tool_schema],
|
||||
available_functions=available_functions,
|
||||
)
|
||||
|
||||
assert isinstance(result, int)
|
||||
assert result == 25
|
||||
|
||||
112
tests/test_flow_default_override.py
Normal file
112
tests/test_flow_default_override.py
Normal file
@@ -0,0 +1,112 @@
|
||||
"""Test that persisted state properly overrides default values."""
|
||||
|
||||
from crewai.flow.flow import Flow, FlowState, listen, start
|
||||
from crewai.flow.persistence import persist
|
||||
|
||||
|
||||
class PoemState(FlowState):
|
||||
"""Test state model with default values that should be overridden."""
|
||||
sentence_count: int = 1000 # Default that should be overridden
|
||||
has_set_count: bool = False # Track whether we've set the count
|
||||
poem_type: str = ""
|
||||
|
||||
|
||||
def test_default_value_override():
|
||||
"""Test that persisted state values override class defaults."""
|
||||
|
||||
@persist()
|
||||
class PoemFlow(Flow[PoemState]):
|
||||
initial_state = PoemState
|
||||
|
||||
@start()
|
||||
def set_sentence_count(self):
|
||||
if self.state.has_set_count and self.state.sentence_count == 2:
|
||||
self.state.sentence_count = 3
|
||||
|
||||
elif self.state.has_set_count and self.state.sentence_count == 1000:
|
||||
self.state.sentence_count = 1000
|
||||
|
||||
elif self.state.has_set_count and self.state.sentence_count == 5:
|
||||
self.state.sentence_count = 5
|
||||
|
||||
else:
|
||||
self.state.sentence_count = 2
|
||||
self.state.has_set_count = True
|
||||
|
||||
# First run - should set sentence_count to 2
|
||||
flow1 = PoemFlow()
|
||||
flow1.kickoff()
|
||||
original_uuid = flow1.state.id
|
||||
assert flow1.state.sentence_count == 2
|
||||
|
||||
# Second run - should load sentence_count=2 instead of default 1000
|
||||
flow2 = PoemFlow()
|
||||
flow2.kickoff(inputs={"id": original_uuid})
|
||||
assert flow2.state.sentence_count == 3 # Should load 2, not default 1000
|
||||
|
||||
# Fourth run - explicit override should work
|
||||
flow3 = PoemFlow()
|
||||
flow3.kickoff(inputs={
|
||||
"id": original_uuid,
|
||||
"has_set_count": True,
|
||||
"sentence_count": 5, # Override persisted value
|
||||
})
|
||||
assert flow3.state.sentence_count == 5 # Should use override value
|
||||
|
||||
# Third run - should not load sentence_count=2 instead of default 1000
|
||||
flow4 = PoemFlow()
|
||||
flow4.kickoff(inputs={"has_set_count": True})
|
||||
assert flow4.state.sentence_count == 1000 # Should load 1000, not 2
|
||||
|
||||
|
||||
def test_multi_step_default_override():
|
||||
"""Test default value override with multiple start methods."""
|
||||
|
||||
@persist()
|
||||
class MultiStepPoemFlow(Flow[PoemState]):
|
||||
initial_state = PoemState
|
||||
|
||||
@start()
|
||||
def set_sentence_count(self):
|
||||
print("Setting sentence count")
|
||||
if not self.state.has_set_count:
|
||||
self.state.sentence_count = 3
|
||||
self.state.has_set_count = True
|
||||
|
||||
@listen(set_sentence_count)
|
||||
def set_poem_type(self):
|
||||
print("Setting poem type")
|
||||
if self.state.sentence_count == 3:
|
||||
self.state.poem_type = "haiku"
|
||||
elif self.state.sentence_count == 5:
|
||||
self.state.poem_type = "limerick"
|
||||
else:
|
||||
self.state.poem_type = "free_verse"
|
||||
|
||||
@listen(set_poem_type)
|
||||
def finished(self):
|
||||
print("finished")
|
||||
|
||||
# First run - should set both sentence count and poem type
|
||||
flow1 = MultiStepPoemFlow()
|
||||
flow1.kickoff()
|
||||
original_uuid = flow1.state.id
|
||||
assert flow1.state.sentence_count == 3
|
||||
assert flow1.state.poem_type == "haiku"
|
||||
|
||||
# Second run - should load persisted state and update poem type
|
||||
flow2 = MultiStepPoemFlow()
|
||||
flow2.kickoff(inputs={
|
||||
"id": original_uuid,
|
||||
"sentence_count": 5
|
||||
})
|
||||
assert flow2.state.sentence_count == 5
|
||||
assert flow2.state.poem_type == "limerick"
|
||||
|
||||
# Third run - new flow without persisted state should use defaults
|
||||
flow3 = MultiStepPoemFlow()
|
||||
flow3.kickoff(inputs={
|
||||
"id": original_uuid
|
||||
})
|
||||
assert flow3.state.sentence_count == 5
|
||||
assert flow3.state.poem_type == "limerick"
|
||||
@@ -1,12 +1,12 @@
|
||||
"""Test flow state persistence functionality."""
|
||||
|
||||
import os
|
||||
from typing import Dict, Optional
|
||||
from typing import Dict
|
||||
|
||||
import pytest
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.flow.flow import Flow, FlowState, start
|
||||
from crewai.flow.flow import Flow, FlowState, listen, start
|
||||
from crewai.flow.persistence import persist
|
||||
from crewai.flow.persistence.sqlite import SQLiteFlowPersistence
|
||||
|
||||
@@ -73,13 +73,14 @@ def test_flow_state_restoration(tmp_path):
|
||||
|
||||
# First flow execution to create initial state
|
||||
class RestorableFlow(Flow[TestState]):
|
||||
initial_state = TestState
|
||||
|
||||
@start()
|
||||
@persist(persistence)
|
||||
def set_message(self):
|
||||
self.state.message = "Original message"
|
||||
self.state.counter = 42
|
||||
if self.state.message == "":
|
||||
self.state.message = "Original message"
|
||||
if self.state.counter == 0:
|
||||
self.state.counter = 42
|
||||
|
||||
# Create and persist initial state
|
||||
flow1 = RestorableFlow(persistence=persistence)
|
||||
@@ -87,11 +88,11 @@ def test_flow_state_restoration(tmp_path):
|
||||
original_uuid = flow1.state.id
|
||||
|
||||
# Test case 1: Restore using restore_uuid with field override
|
||||
flow2 = RestorableFlow(
|
||||
persistence=persistence,
|
||||
restore_uuid=original_uuid,
|
||||
counter=43, # Override counter
|
||||
)
|
||||
flow2 = RestorableFlow(persistence=persistence)
|
||||
flow2.kickoff(inputs={
|
||||
"id": original_uuid,
|
||||
"counter": 43
|
||||
})
|
||||
|
||||
# Verify state restoration and selective field override
|
||||
assert flow2.state.id == original_uuid
|
||||
@@ -99,48 +100,17 @@ def test_flow_state_restoration(tmp_path):
|
||||
assert flow2.state.counter == 43 # Overridden
|
||||
|
||||
# Test case 2: Restore using kwargs['id']
|
||||
flow3 = RestorableFlow(
|
||||
persistence=persistence,
|
||||
id=original_uuid,
|
||||
message="Updated message", # Override message
|
||||
)
|
||||
flow3 = RestorableFlow(persistence=persistence)
|
||||
flow3.kickoff(inputs={
|
||||
"id": original_uuid,
|
||||
"message": "Updated message"
|
||||
})
|
||||
|
||||
# Verify state restoration and selective field override
|
||||
assert flow3.state.id == original_uuid
|
||||
assert flow3.state.counter == 42 # Preserved
|
||||
assert flow3.state.counter == 43 # Preserved
|
||||
assert flow3.state.message == "Updated message" # Overridden
|
||||
|
||||
# Test case 3: Verify error on conflicting IDs
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
RestorableFlow(
|
||||
persistence=persistence,
|
||||
restore_uuid=original_uuid,
|
||||
id="different-id", # Conflict with restore_uuid
|
||||
)
|
||||
assert "Conflicting IDs provided" in str(exc_info.value)
|
||||
|
||||
# Test case 4: Verify error on non-existent restore_uuid
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
RestorableFlow(
|
||||
persistence=persistence,
|
||||
restore_uuid="non-existent-uuid",
|
||||
)
|
||||
assert "No state found" in str(exc_info.value)
|
||||
|
||||
# Test case 5: Allow new state creation with kwargs['id']
|
||||
new_uuid = "new-flow-id"
|
||||
flow4 = RestorableFlow(
|
||||
persistence=persistence,
|
||||
id=new_uuid,
|
||||
message="New message",
|
||||
counter=100,
|
||||
)
|
||||
|
||||
# Verify new state creation with provided ID
|
||||
assert flow4.state.id == new_uuid
|
||||
assert flow4.state.message == "New message"
|
||||
assert flow4.state.counter == 100
|
||||
|
||||
|
||||
def test_multiple_method_persistence(tmp_path):
|
||||
"""Test state persistence across multiple method executions."""
|
||||
@@ -148,48 +118,59 @@ def test_multiple_method_persistence(tmp_path):
|
||||
persistence = SQLiteFlowPersistence(db_path)
|
||||
|
||||
class MultiStepFlow(Flow[TestState]):
|
||||
initial_state = TestState
|
||||
|
||||
@start()
|
||||
@persist(persistence)
|
||||
def step_1(self):
|
||||
self.state.counter = 1
|
||||
self.state.message = "Step 1"
|
||||
if self.state.counter == 1:
|
||||
self.state.counter = 99999
|
||||
self.state.message = "Step 99999"
|
||||
else:
|
||||
self.state.counter = 1
|
||||
self.state.message = "Step 1"
|
||||
|
||||
@start()
|
||||
@listen(step_1)
|
||||
@persist(persistence)
|
||||
def step_2(self):
|
||||
self.state.counter = 2
|
||||
self.state.message = "Step 2"
|
||||
if self.state.counter == 1:
|
||||
self.state.counter = 2
|
||||
self.state.message = "Step 2"
|
||||
|
||||
flow = MultiStepFlow(persistence=persistence)
|
||||
flow.kickoff()
|
||||
|
||||
flow2 = MultiStepFlow(persistence=persistence)
|
||||
flow2.kickoff(inputs={"id": flow.state.id})
|
||||
|
||||
# Load final state
|
||||
final_state = persistence.load_state(flow.state.id)
|
||||
final_state = flow2.state
|
||||
assert final_state is not None
|
||||
assert final_state["counter"] == 2
|
||||
assert final_state["message"] == "Step 2"
|
||||
|
||||
|
||||
def test_persistence_error_handling(tmp_path):
|
||||
"""Test error handling in persistence operations."""
|
||||
db_path = os.path.join(tmp_path, "test_flows.db")
|
||||
persistence = SQLiteFlowPersistence(db_path)
|
||||
|
||||
class InvalidFlow(Flow[TestState]):
|
||||
# Missing id field in initial state
|
||||
class InvalidState(BaseModel):
|
||||
value: str = ""
|
||||
|
||||
initial_state = InvalidState
|
||||
assert final_state.counter == 2
|
||||
assert final_state.message == "Step 2"
|
||||
|
||||
class NoPersistenceMultiStepFlow(Flow[TestState]):
|
||||
@start()
|
||||
@persist(persistence)
|
||||
def will_fail(self):
|
||||
self.state.value = "test"
|
||||
def step_1(self):
|
||||
if self.state.counter == 1:
|
||||
self.state.counter = 99999
|
||||
self.state.message = "Step 99999"
|
||||
else:
|
||||
self.state.counter = 1
|
||||
self.state.message = "Step 1"
|
||||
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
flow = InvalidFlow(persistence=persistence)
|
||||
@listen(step_1)
|
||||
def step_2(self):
|
||||
if self.state.counter == 1:
|
||||
self.state.counter = 2
|
||||
self.state.message = "Step 2"
|
||||
|
||||
assert "must have an 'id' field" in str(exc_info.value)
|
||||
flow = NoPersistenceMultiStepFlow(persistence=persistence)
|
||||
flow.kickoff()
|
||||
|
||||
flow2 = NoPersistenceMultiStepFlow(persistence=persistence)
|
||||
flow2.kickoff(inputs={"id": flow.state.id})
|
||||
|
||||
# Load final state
|
||||
final_state = flow2.state
|
||||
assert final_state.counter == 99999
|
||||
assert final_state.message == "Step 99999"
|
||||
|
||||
Reference in New Issue
Block a user