mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-01 04:08:30 +00:00
Compare commits
48 Commits
fix/tests-
...
feature/re
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
92fca9bbe9 | ||
|
|
5c04c63127 | ||
|
|
1a44a34c17 | ||
|
|
363ce5e9ce | ||
|
|
10b84955ad | ||
|
|
691b094a40 | ||
|
|
68e9e54c88 | ||
|
|
d0d99125c4 | ||
|
|
129000d01f | ||
|
|
47f9d026dd | ||
|
|
b75b0b5552 | ||
|
|
3dd6249f1e | ||
|
|
8451113039 | ||
|
|
a79b216875 | ||
|
|
52217c2f63 | ||
|
|
7edacf6e24 | ||
|
|
58558a1950 | ||
|
|
1607c85ae5 | ||
|
|
a6ff342948 | ||
|
|
d2eb54ebf8 | ||
|
|
a41bd18599 | ||
|
|
bb64c80964 | ||
|
|
a3bdc09f2d | ||
|
|
bae9c70730 | ||
|
|
55af7e0f15 | ||
|
|
e745094d73 | ||
|
|
053d8a0449 | ||
|
|
0bfa549477 | ||
|
|
5334e9e585 | ||
|
|
68de393534 | ||
|
|
f36f73e035 | ||
|
|
1f9166f61b | ||
|
|
5a5276eb5d | ||
|
|
60c8f86345 | ||
|
|
6a47eb4f9e | ||
|
|
2efe16eac9 | ||
|
|
1d2827e9a5 | ||
|
|
5091712a2d | ||
|
|
764234c426 | ||
|
|
be0a4c2fe5 | ||
|
|
cc1c97e87d | ||
|
|
5775ed3fcb | ||
|
|
5f820cedcc | ||
|
|
f86e4a1990 | ||
|
|
ee4a996de3 | ||
|
|
5c504f4087 | ||
|
|
26489ced1a | ||
|
|
ea5a784877 |
4
.github/workflows/tests.yml
vendored
4
.github/workflows/tests.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.10"
|
||||
python-version: "3.11.9"
|
||||
|
||||
- name: Install Requirements
|
||||
run: |
|
||||
@@ -28,4 +28,4 @@ jobs:
|
||||
poetry install
|
||||
|
||||
- name: Run tests
|
||||
run: poetry run pytest tests
|
||||
run: poetry run pytest
|
||||
|
||||
@@ -12,7 +12,7 @@ description: Leveraging memory systems in the crewAI framework to enhance agent
|
||||
| Component | Description |
|
||||
| :------------------- | :----------------------------------------------------------- |
|
||||
| **Short-Term Memory**| Temporarily stores recent interactions and outcomes, enabling agents to recall and utilize information relevant to their current context during the current executions. |
|
||||
| **Long-Term Memory** | Preserves valuable insights and learnings from past executions, allowing agents to build and refine their knowledge over time. So Agents can remeber what they did right and wrong across multiple executions |
|
||||
| **Long-Term Memory** | Preserves valuable insights and learnings from past executions, allowing agents to build and refine their knowledge over time. So Agents can remember what they did right and wrong across multiple executions |
|
||||
| **Entity Memory** | Captures and organizes information about entities (people, places, concepts) encountered during tasks, facilitating deeper understanding and relationship mapping. |
|
||||
| **Contextual Memory**| Maintains the context of interactions by combining `ShortTermMemory`, `LongTermMemory`, and `EntityMemory`, aiding in the coherence and relevance of agent responses over a sequence of tasks or a conversation. |
|
||||
|
||||
|
||||
@@ -51,7 +51,7 @@ To optimize tool performance with caching, define custom caching strategies usin
|
||||
@tool("Tool with Caching")
|
||||
def cached_tool(argument: str) -> str:
|
||||
"""Tool functionality description."""
|
||||
return "Cachable result"
|
||||
return "Cacheable result"
|
||||
|
||||
def my_cache_strategy(arguments: dict, result: str) -> bool:
|
||||
# Define custom caching logic
|
||||
|
||||
@@ -79,5 +79,4 @@ manager = Agent(
|
||||
|
||||
1. `allow_code_execution`: Enable or disable code execution capabilities for the agent (default is False).
|
||||
2. `max_execution_time`: Set a maximum execution time (in seconds) for the agent to complete a task.
|
||||
3. `function_calling_llm`: Specify a separate language model for function calling.
|
||||
4
|
||||
3. `function_calling_llm`: Specify a separate language model for function calling.
|
||||
31
docs/how-to/Force-Tool-Ouput-as-Result.md
Normal file
31
docs/how-to/Force-Tool-Ouput-as-Result.md
Normal file
@@ -0,0 +1,31 @@
|
||||
---
|
||||
title: Forcing Tool Output as Result
|
||||
description: Learn how to force tool output as the result in of an Agent's task in crewAI.
|
||||
---
|
||||
|
||||
## Introduction
|
||||
In CrewAI, you can force the output of a tool as the result of an agent's task. This feature is useful when you want to ensure that the tool output is captured and returned as the task result, and avoid the agent modifying the output during the task execution.
|
||||
|
||||
## Forcing Tool Output as Result
|
||||
To force the tool output as the result of an agent's task, you can set the `force_tool_output` parameter to `True` when creating the task. This parameter ensures that the tool output is captured and returned as the task result, without any modifications by the agent.
|
||||
|
||||
Here's an example of how to force the tool output as the result of an agent's task:
|
||||
|
||||
```python
|
||||
# ...
|
||||
# Define a custom tool that returns the result as the answer
|
||||
coding_agent =Agent(
|
||||
role="Data Scientist",
|
||||
goal="Product amazing resports on AI",
|
||||
backstory="You work with data and AI",
|
||||
tools=[MyCustomTool(result_as_answer=True)],
|
||||
)
|
||||
# ...
|
||||
```
|
||||
|
||||
### Workflow in Action
|
||||
|
||||
1. **Task Execution**: The agent executes the task using the tool provided.
|
||||
2. **Tool Output**: The tool generates the output, which is captured as the task result.
|
||||
3. **Agent Interaction**: The agent my reflect and take learnings from the tool but the output is not modified.
|
||||
4. **Result Return**: The tool output is returned as the task result without any modifications.
|
||||
137
docs/how-to/Start-a-New-CrewAI-Project.md
Normal file
137
docs/how-to/Start-a-New-CrewAI-Project.md
Normal file
@@ -0,0 +1,137 @@
|
||||
---
|
||||
title: Starting a New CrewAI Project
|
||||
description: A comprehensive guide to starting a new CrewAI project, including the latest updates and project setup methods.
|
||||
---
|
||||
|
||||
# Starting Your CrewAI Project
|
||||
|
||||
Welcome to the ultimate guide for starting a new CrewAI project. This document will walk you through the steps to create, customize, and run your CrewAI project, ensuring you have everything you need to get started.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
We assume you have already installed CrewAI. If not, please refer to the [installation guide](how-to/Installing-CrewAI.md) to install CrewAI and its dependencies.
|
||||
|
||||
## Creating a New Project
|
||||
|
||||
To create a new project, run the following CLI command:
|
||||
|
||||
```shell
|
||||
$ crewai create my_project
|
||||
```
|
||||
|
||||
This command will create a new project folder with the following structure:
|
||||
|
||||
```shell
|
||||
my_project/
|
||||
├── .gitignore
|
||||
├── pyproject.toml
|
||||
├── README.md
|
||||
└── src/
|
||||
└── my_project/
|
||||
├── __init__.py
|
||||
├── main.py
|
||||
├── crew.py
|
||||
├── tools/
|
||||
│ ├── custom_tool.py
|
||||
│ └── __init__.py
|
||||
└── config/
|
||||
├── agents.yaml
|
||||
└── tasks.yaml
|
||||
```
|
||||
|
||||
You can now start developing your project by editing the files in the `src/my_project` folder. The `main.py` file is the entry point of your project, and the `crew.py` file is where you define your agents and tasks.
|
||||
|
||||
## Customizing Your Project
|
||||
|
||||
To customize your project, you can:
|
||||
- Modify `src/my_project/config/agents.yaml` to define your agents.
|
||||
- Modify `src/my_project/config/tasks.yaml` to define your tasks.
|
||||
- Modify `src/my_project/crew.py` to add your own logic, tools, and specific arguments.
|
||||
- Modify `src/my_project/main.py` to add custom inputs for your agents and tasks.
|
||||
- Add your environment variables into the `.env` file.
|
||||
|
||||
### Example: Defining Agents and Tasks
|
||||
|
||||
#### agents.yaml
|
||||
|
||||
```yaml
|
||||
researcher:
|
||||
role: >
|
||||
Job Candidate Researcher
|
||||
goal: >
|
||||
Find potential candidates for the job
|
||||
backstory: >
|
||||
You are adept at finding the right candidates by exploring various online
|
||||
resources. Your skill in identifying suitable candidates ensures the best
|
||||
match for job positions.
|
||||
```
|
||||
|
||||
#### tasks.yaml
|
||||
|
||||
```yaml
|
||||
research_candidates_task:
|
||||
description: >
|
||||
Conduct thorough research to find potential candidates for the specified job.
|
||||
Utilize various online resources and databases to gather a comprehensive list of potential candidates.
|
||||
Ensure that the candidates meet the job requirements provided.
|
||||
|
||||
Job Requirements:
|
||||
{job_requirements}
|
||||
expected_output: >
|
||||
A list of 10 potential candidates with their contact information and brief profiles highlighting their suitability.
|
||||
```
|
||||
|
||||
## Installing Dependencies
|
||||
|
||||
To install the dependencies for your project, you can use Poetry. First, navigate to your project directory:
|
||||
|
||||
```shell
|
||||
$ cd my_project
|
||||
$ poetry lock
|
||||
$ poetry install
|
||||
```
|
||||
|
||||
This will install the dependencies specified in the `pyproject.toml` file.
|
||||
|
||||
## Interpolating Variables
|
||||
|
||||
Any variable interpolated in your `agents.yaml` and `tasks.yaml` files like `{variable}` will be replaced by the value of the variable in the `main.py` file.
|
||||
|
||||
#### agents.yaml
|
||||
|
||||
```yaml
|
||||
research_task:
|
||||
description: >
|
||||
Conduct a thorough research about the customer and competitors in the context
|
||||
of {customer_domain}.
|
||||
Make sure you find any interesting and relevant information given the
|
||||
current year is 2024.
|
||||
expected_output: >
|
||||
A complete report on the customer and their customers and competitors,
|
||||
including their demographics, preferences, market positioning and audience engagement.
|
||||
```
|
||||
|
||||
#### main.py
|
||||
|
||||
```python
|
||||
# main.py
|
||||
def run():
|
||||
inputs = {
|
||||
"customer_domain": "crewai.com"
|
||||
}
|
||||
MyProjectCrew(inputs).crew().kickoff(inputs=inputs)
|
||||
```
|
||||
|
||||
## Running Your Project
|
||||
|
||||
To run your project, use the following command:
|
||||
|
||||
```shell
|
||||
$ poetry run my_project
|
||||
```
|
||||
|
||||
This will initialize your crew of AI agents and begin task execution as defined in your configuration in the `main.py` file.
|
||||
|
||||
## Deploying Your Project
|
||||
|
||||
The easiest way to deploy your crew is through [CrewAI+](https://www.crewai.com/crewaiplus), where you can deploy your crew in a few clicks.
|
||||
@@ -48,6 +48,11 @@ Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By
|
||||
<div style="width:30%">
|
||||
<h2>How-To Guides</h2>
|
||||
<ul>
|
||||
<li>
|
||||
<a href="./how-to/Start-a-New-CrewAI-Project">
|
||||
Starting Your crewAI Project
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="./how-to/Installing-CrewAI">
|
||||
Installing crewAI
|
||||
@@ -88,6 +93,11 @@ Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By
|
||||
Coding Agents
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="./how-to/Force-Tool-Ouput-as-Result">
|
||||
Forcing Tool Output as Result
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="./how-to/Human-Input-on-Execution">
|
||||
Human Input on Execution
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
We are still working on improving tools, so there might be unexpected behavior or changes in the future.
|
||||
|
||||
## Description
|
||||
The GithubSearchTool is a Read, Append, and Generate (RAG) tool specifically designed for conducting semantic searches within GitHub repositories. Utilizing advanced semantic search capabilities, it sifts through code, pull requests, issues, and repositories, making it an essential tool for developers, researchers, or anyone in need of precise information from GitHub.
|
||||
The GithubSearchTool is a Retrieval-Augmented Generation (RAG) tool specifically designed for conducting semantic searches within GitHub repositories. Utilizing advanced semantic search capabilities, it sifts through code, pull requests, issues, and repositories, making it an essential tool for developers, researchers, or anyone in need of precise information from GitHub.
|
||||
|
||||
## Installation
|
||||
To use the GithubSearchTool, first ensure the crewai_tools package is installed in your Python environment:
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
The MDXSearchTool is in continuous development. Features may be added or removed, and functionality could change unpredictably as we refine the tool.
|
||||
|
||||
## Description
|
||||
The MDX Search Tool is a component of the `crewai_tools` package aimed at facilitating advanced market data extraction. This tool is invaluable for researchers and analysts seeking quick access to market insights, especially within the AI sector. It simplifies the task of acquiring, interpreting, and organizing market data by interfacing with various data sources.
|
||||
The MDX Search Tool is a component of the `crewai_tools` package aimed at facilitating advanced markdown language extraction. It enables users to effectively search and extract relevant information from MD files using query-based searches. This tool is invaluable for data analysis, information management, and research tasks, streamlining the process of finding specific information within large document collections.
|
||||
|
||||
## Installation
|
||||
Before using the MDX Search Tool, ensure the `crewai_tools` package is installed. If it is not, you can install it with the following command:
|
||||
@@ -59,4 +59,4 @@ tool = MDXSearchTool(
|
||||
),
|
||||
)
|
||||
)
|
||||
```
|
||||
```
|
||||
|
||||
@@ -31,7 +31,7 @@ tool = TXTSearchTool(txt='path/to/text/file.txt')
|
||||
```
|
||||
|
||||
## Arguments
|
||||
- `txt` (str): **Optinal**. The path to the text file you want to search. This argument is only required if the tool was not initialized with a specific text file; otherwise, the search will be conducted within the initially provided text file.
|
||||
- `txt` (str): **Optional**. The path to the text file you want to search. This argument is only required if the tool was not initialized with a specific text file; otherwise, the search will be conducted within the initially provided text file.
|
||||
|
||||
## Custom model and embeddings
|
||||
|
||||
|
||||
@@ -131,6 +131,7 @@ nav:
|
||||
- Using LangChain Tools: 'core-concepts/Using-LangChain-Tools.md'
|
||||
- Using LlamaIndex Tools: 'core-concepts/Using-LlamaIndex-Tools.md'
|
||||
- How to Guides:
|
||||
- Starting Your crewAI Project: 'how-to/Start-a-New-CrewAI-Project.md'
|
||||
- Installing CrewAI: 'how-to/Installing-CrewAI.md'
|
||||
- Getting Started: 'how-to/Creating-a-Crew-and-kick-it-off.md'
|
||||
- Create Custom Tools: 'how-to/Create-Custom-Tools.md'
|
||||
@@ -140,6 +141,7 @@ nav:
|
||||
- Connecting to any LLM: 'how-to/LLM-Connections.md'
|
||||
- Customizing Agents: 'how-to/Customizing-Agents.md'
|
||||
- Coding Agents: 'how-to/Coding-Agents.md'
|
||||
- Forcing Tool Output as Result: 'how-to/Force-Tool-Ouput-as-Result.md'
|
||||
- Human Input on Execution: 'how-to/Human-Input-on-Execution.md'
|
||||
- Kickoff a Crew Asynchronously: 'how-to/Kickoff-async.md'
|
||||
- Kickoff a Crew for a List: 'how-to/Kickoff-for-each.md'
|
||||
|
||||
153
poetry.lock
generated
153
poetry.lock
generated
@@ -343,17 +343,17 @@ lxml = ["lxml"]
|
||||
|
||||
[[package]]
|
||||
name = "boto3"
|
||||
version = "1.34.139"
|
||||
version = "1.34.140"
|
||||
description = "The AWS SDK for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "boto3-1.34.139-py3-none-any.whl", hash = "sha256:98b2a12bcb30e679fa9f60fc74145a39db5ec2ca7b7c763f42896e3bd9b3a38d"},
|
||||
{file = "boto3-1.34.139.tar.gz", hash = "sha256:32b99f0d76ec81fdca287ace2c9744a2eb8b92cb62bf4d26d52a4f516b63a6bf"},
|
||||
{file = "boto3-1.34.140-py3-none-any.whl", hash = "sha256:23ca8d8f7a30c3bbd989808056b5fc5d68ff5121c02c722c6167b6b1bb7f8726"},
|
||||
{file = "boto3-1.34.140.tar.gz", hash = "sha256:578bbd5e356005719b6b610d03edff7ea1b0824d078afe62d3fb8bea72f83a87"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
botocore = ">=1.34.139,<1.35.0"
|
||||
botocore = ">=1.34.140,<1.35.0"
|
||||
jmespath = ">=0.7.1,<2.0.0"
|
||||
s3transfer = ">=0.10.0,<0.11.0"
|
||||
|
||||
@@ -362,13 +362,13 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"]
|
||||
|
||||
[[package]]
|
||||
name = "botocore"
|
||||
version = "1.34.139"
|
||||
version = "1.34.140"
|
||||
description = "Low-level, data-driven core of boto 3."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "botocore-1.34.139-py3-none-any.whl", hash = "sha256:dd1e085d4caa2a4c1b7d83e3bc51416111c8238a35d498e9d3b04f3b63b086ba"},
|
||||
{file = "botocore-1.34.139.tar.gz", hash = "sha256:df023d8cf8999d574214dad4645cb90f9d2ccd1494f6ee2b57b1ab7522f6be77"},
|
||||
{file = "botocore-1.34.140-py3-none-any.whl", hash = "sha256:43940d3a67d946ba3301631ba4078476a75f1015d4fb0fb0272d0b754b2cf9de"},
|
||||
{file = "botocore-1.34.140.tar.gz", hash = "sha256:86302b2226c743b9eec7915a4c6cfaffd338ae03989cd9ee181078ef39d1ab39"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -747,13 +747,13 @@ all = ["pycocotools (==2.0.6)"]
|
||||
|
||||
[[package]]
|
||||
name = "clarifai-grpc"
|
||||
version = "10.5.4"
|
||||
version = "10.6.1"
|
||||
description = "Clarifai gRPC API Client"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "clarifai_grpc-10.5.4-py3-none-any.whl", hash = "sha256:ae4c4d8985fdd2bf326cec27ee834571e44d0e989fb12686dd681f9b553ae218"},
|
||||
{file = "clarifai_grpc-10.5.4.tar.gz", hash = "sha256:c67ce0dde186e8bab0d42a9923d28ddb4a05017b826c8e52ac7a86ec6df5f12a"},
|
||||
{file = "clarifai_grpc-10.6.1-py3-none-any.whl", hash = "sha256:7f07c262f46042995b11af10cdd552718c4487e955db1b3f1253fcb0c2ab1ce1"},
|
||||
{file = "clarifai_grpc-10.6.1.tar.gz", hash = "sha256:f692e3d6a051a1228ca371c3a9dc705cc9a61334eecc454d056f7af0b6f4dbad"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -840,13 +840,13 @@ files = [
|
||||
|
||||
[[package]]
|
||||
name = "crewai-tools"
|
||||
version = "0.4.7"
|
||||
version = "0.4.8"
|
||||
description = "Set of tools for the crewAI framework"
|
||||
optional = false
|
||||
python-versions = "<=3.13,>=3.10"
|
||||
files = [
|
||||
{file = "crewai_tools-0.4.7-py3-none-any.whl", hash = "sha256:3ff04b2da07d2c48e72f898511295b4a10038dd3e4fe859baa93fec1fb8baf8e"},
|
||||
{file = "crewai_tools-0.4.7.tar.gz", hash = "sha256:4502a5e0ab94a7dae6638d000768f80049918909ca5338cdebc280351b3ce003"},
|
||||
{file = "crewai_tools-0.4.8-py3-none-any.whl", hash = "sha256:628b08515ee0e06c751da1dd66b0cff70c9b2644775891c8f59883cb5debfef4"},
|
||||
{file = "crewai_tools-0.4.8.tar.gz", hash = "sha256:ae190bd187f980163523c86ee7e1eb2ed78896f935d6caff98908dd7ab6c982b"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -884,6 +884,21 @@ webencodings = "*"
|
||||
doc = ["sphinx", "sphinx_rtd_theme"]
|
||||
test = ["flake8", "isort", "pytest"]
|
||||
|
||||
[[package]]
|
||||
name = "dataclasses-json"
|
||||
version = "0.6.7"
|
||||
description = "Easily serialize dataclasses to and from JSON."
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.7"
|
||||
files = [
|
||||
{file = "dataclasses_json-0.6.7-py3-none-any.whl", hash = "sha256:0dbf33f26c8d5305befd61b39d2b3414e8a407bedc2834dea9b8d642666fb40a"},
|
||||
{file = "dataclasses_json-0.6.7.tar.gz", hash = "sha256:b6b3e528266ea45b9535223bc53ca645f5208833c29229e847b3f26a1cc55fc0"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
marshmallow = ">=3.18.0,<4.0.0"
|
||||
typing-inspect = ">=0.4.0,<1"
|
||||
|
||||
[[package]]
|
||||
name = "decorator"
|
||||
version = "5.1.1"
|
||||
@@ -1039,13 +1054,13 @@ idna = ">=2.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "embedchain"
|
||||
version = "0.1.114"
|
||||
version = "0.1.116"
|
||||
description = "Simplest open source retrieval (RAG) framework"
|
||||
optional = false
|
||||
python-versions = "<=3.13,>=3.9"
|
||||
files = [
|
||||
{file = "embedchain-0.1.114-py3-none-any.whl", hash = "sha256:ce1b16196bcf53c679cacead0551a5466c33a9080a82be63f973e4437b0823ca"},
|
||||
{file = "embedchain-0.1.114.tar.gz", hash = "sha256:fa5c4a29dd3c6b1137c772e1bc3e2d7ca489c58f46f4c7f7de133b3b9fc56e72"},
|
||||
{file = "embedchain-0.1.116-py3-none-any.whl", hash = "sha256:388835d047f9ff4542ebf50e3fa633ef596db262cbe506195ee4976b91a49172"},
|
||||
{file = "embedchain-0.1.116.tar.gz", hash = "sha256:3e4d6418df2e749c2bd3cd3153c3857cbecd7227afe40b87d5ac3df629c394b2"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -1053,11 +1068,14 @@ alembic = ">=1.13.1,<2.0.0"
|
||||
beautifulsoup4 = ">=4.12.2,<5.0.0"
|
||||
chromadb = ">=0.4.24,<0.5.0"
|
||||
clarifai = ">=10.0.1,<11.0.0"
|
||||
cohere = ">=5.3,<6.0"
|
||||
google-cloud-aiplatform = ">=1.26.1,<2.0.0"
|
||||
gptcache = ">=0.1.43,<0.2.0"
|
||||
langchain = ">0.2,<=0.3"
|
||||
langchain-cohere = ">=0.1.4,<0.2.0"
|
||||
langchain-community = ">=0.2.6,<0.3.0"
|
||||
langchain-openai = ">=0.1.7,<0.2.0"
|
||||
memzero = ">=0.0.7,<0.0.8"
|
||||
openai = ">=1.1.1"
|
||||
posthog = ">=3.0.2,<4.0.0"
|
||||
pypdf = ">=4.0.1,<5.0.0"
|
||||
@@ -1070,7 +1088,6 @@ tiktoken = ">=0.7.0,<0.8.0"
|
||||
|
||||
[package.extras]
|
||||
aws-bedrock = ["boto3 (>=1.34.20,<2.0.0)"]
|
||||
cohere = ["cohere (>=5.3,<6.0)"]
|
||||
dataloaders = ["docx2txt (>=0.8,<0.9)", "duckduckgo-search (>=6.1.5,<7.0.0)", "pytube (>=15.0.0,<16.0.0)", "sentence-transformers (>=2.2.2,<3.0.0)", "youtube-transcript-api (>=0.6.1,<0.7.0)"]
|
||||
discord = ["discord (>=2.3.2,<3.0.0)"]
|
||||
dropbox = ["dropbox (>=11.36.2,<12.0.0)"]
|
||||
@@ -2035,13 +2052,13 @@ pyreadline3 = {version = "*", markers = "sys_platform == \"win32\" and python_ve
|
||||
|
||||
[[package]]
|
||||
name = "identify"
|
||||
version = "2.5.36"
|
||||
version = "2.6.0"
|
||||
description = "File identification library for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "identify-2.5.36-py2.py3-none-any.whl", hash = "sha256:37d93f380f4de590500d9dba7db359d0d3da95ffe7f9de1753faa159e71e7dfa"},
|
||||
{file = "identify-2.5.36.tar.gz", hash = "sha256:e5e00f54165f9047fbebeb4a560f9acfb8af4c88232be60a488e9b68d122745d"},
|
||||
{file = "identify-2.6.0-py2.py3-none-any.whl", hash = "sha256:e79ae4406387a9d300332b5fd366d8994f1525e8414984e1a59e058b2eda2dd0"},
|
||||
{file = "identify-2.6.0.tar.gz", hash = "sha256:cb171c685bdc31bcc4c1734698736a7d5b6c8bf2e0c15117f4d469c8640ae5cf"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
@@ -2402,6 +2419,32 @@ files = [
|
||||
cohere = ">=5.5.6,<6.0"
|
||||
langchain-core = ">=0.2.0,<0.3"
|
||||
|
||||
[[package]]
|
||||
name = "langchain-community"
|
||||
version = "0.2.6"
|
||||
description = "Community contributed LangChain integrations."
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.8.1"
|
||||
files = [
|
||||
{file = "langchain_community-0.2.6-py3-none-any.whl", hash = "sha256:758cc800acfe5dd396bf8ba1b57c4792639ead0eab48ed0367f0732ec6ee1f68"},
|
||||
{file = "langchain_community-0.2.6.tar.gz", hash = "sha256:40ce09a50ed798aa651ddb34c8978200fa8589b9813c7a28ce8af027bbf249f0"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
aiohttp = ">=3.8.3,<4.0.0"
|
||||
dataclasses-json = ">=0.5.7,<0.7"
|
||||
langchain = ">=0.2.6,<0.3.0"
|
||||
langchain-core = ">=0.2.10,<0.3.0"
|
||||
langsmith = ">=0.1.0,<0.2.0"
|
||||
numpy = [
|
||||
{version = ">=1.26.0,<2.0.0", markers = "python_version >= \"3.12\""},
|
||||
{version = ">=1,<2", markers = "python_version < \"3.12\""},
|
||||
]
|
||||
PyYAML = ">=5.3"
|
||||
requests = ">=2,<3"
|
||||
SQLAlchemy = ">=1.4,<3"
|
||||
tenacity = ">=8.1.0,<8.4.0 || >8.4.0,<9.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "langchain-core"
|
||||
version = "0.2.11"
|
||||
@@ -2456,13 +2499,13 @@ langchain-core = ">=0.2.10,<0.3.0"
|
||||
|
||||
[[package]]
|
||||
name = "langsmith"
|
||||
version = "0.1.83"
|
||||
version = "0.1.84"
|
||||
description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform."
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.8.1"
|
||||
files = [
|
||||
{file = "langsmith-0.1.83-py3-none-any.whl", hash = "sha256:f54d8cd8479b648b6339f3f735d19292c3516d080f680933ecdca3eab4b67ed3"},
|
||||
{file = "langsmith-0.1.83.tar.gz", hash = "sha256:5cdd947212c8ad19adb992c06471c860185a777daa6859bb47150f90daf64bf3"},
|
||||
{file = "langsmith-0.1.84-py3-none-any.whl", hash = "sha256:01f3c6390dba26c583bac8dd0e551ce3d0509c7f55cad714db0b5c8d36e4c7ff"},
|
||||
{file = "langsmith-0.1.84.tar.gz", hash = "sha256:5220c0439838b9a5bd320fd3686be505c5083dcee22d2452006c23891153bea1"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -2600,6 +2643,25 @@ files = [
|
||||
{file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "marshmallow"
|
||||
version = "3.21.3"
|
||||
description = "A lightweight library for converting complex datatypes to and from native Python datatypes."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "marshmallow-3.21.3-py3-none-any.whl", hash = "sha256:86ce7fb914aa865001a4b2092c4c2872d13bc347f3d42673272cabfdbad386f1"},
|
||||
{file = "marshmallow-3.21.3.tar.gz", hash = "sha256:4f57c5e050a54d66361e826f94fba213eb10b67b2fdb02c3e0343ce207ba1662"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
packaging = ">=17.0"
|
||||
|
||||
[package.extras]
|
||||
dev = ["marshmallow[tests]", "pre-commit (>=3.5,<4.0)", "tox"]
|
||||
docs = ["alabaster (==0.7.16)", "autodocsumm (==0.2.12)", "sphinx (==7.3.7)", "sphinx-issues (==4.1.0)", "sphinx-version-warning (==1.1.2)"]
|
||||
tests = ["pytest", "pytz", "simplejson"]
|
||||
|
||||
[[package]]
|
||||
name = "mdurl"
|
||||
version = "0.1.2"
|
||||
@@ -2611,6 +2673,22 @@ files = [
|
||||
{file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "memzero"
|
||||
version = "0.0.7"
|
||||
description = "Long-term memory for AI Agents"
|
||||
optional = false
|
||||
python-versions = "<4.0,>=3.9"
|
||||
files = [
|
||||
{file = "memzero-0.0.7-py3-none-any.whl", hash = "sha256:65f6da88d46263dbc05621fcd01bd09616d0e7f082d55ed9899dc2152491ffd2"},
|
||||
{file = "memzero-0.0.7.tar.gz", hash = "sha256:0c1f413d8ee0ade955fe9f8b8f5aff2cf58bc94869537aca62139db3d9f50725"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
httpx = ">=0.27.0,<0.28.0"
|
||||
posthog = ">=3.5.0,<4.0.0"
|
||||
pydantic = ">=2.7.3,<3.0.0"
|
||||
|
||||
[[package]]
|
||||
name = "mergedeep"
|
||||
version = "1.3.4"
|
||||
@@ -4972,13 +5050,13 @@ widechars = ["wcwidth"]
|
||||
|
||||
[[package]]
|
||||
name = "tenacity"
|
||||
version = "8.4.2"
|
||||
version = "8.5.0"
|
||||
description = "Retry code until it succeeds"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "tenacity-8.4.2-py3-none-any.whl", hash = "sha256:9e6f7cf7da729125c7437222f8a522279751cdfbe6b67bfe64f75d3a348661b2"},
|
||||
{file = "tenacity-8.4.2.tar.gz", hash = "sha256:cd80a53a79336edba8489e767f729e4f391c896956b57140b5d7511a64bbd3ef"},
|
||||
{file = "tenacity-8.5.0-py3-none-any.whl", hash = "sha256:b594c2a5945830c267ce6b79a166228323ed52718f30302c1359836112346687"},
|
||||
{file = "tenacity-8.5.0.tar.gz", hash = "sha256:8bc6c0c8a09b31e6cad13c47afbed1a567518250a9a171418582ed8d9c20ca78"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
@@ -5205,13 +5283,13 @@ telegram = ["requests"]
|
||||
|
||||
[[package]]
|
||||
name = "trio"
|
||||
version = "0.25.1"
|
||||
version = "0.26.0"
|
||||
description = "A friendly Python library for async concurrency and I/O"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "trio-0.25.1-py3-none-any.whl", hash = "sha256:e42617ba091e7b2e50c899052e83a3c403101841de925187f61e7b7eaebdf3fb"},
|
||||
{file = "trio-0.25.1.tar.gz", hash = "sha256:9f5314f014ea3af489e77b001861c535005c3858d38ec46b6b071ebfa339d7fb"},
|
||||
{file = "trio-0.26.0-py3-none-any.whl", hash = "sha256:bb9c1b259591af941fccfbabbdc65bc7ed764bd2db76428454c894cd5e3d2032"},
|
||||
{file = "trio-0.26.0.tar.gz", hash = "sha256:67c5ec3265dd4abc7b1d1ab9ca4fe4c25b896f9c93dac73713778adab487f9c4"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
@@ -5314,6 +5392,21 @@ files = [
|
||||
{file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "typing-inspect"
|
||||
version = "0.9.0"
|
||||
description = "Runtime inspection utilities for typing module."
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"},
|
||||
{file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
mypy-extensions = ">=0.3.0"
|
||||
typing-extensions = ">=3.7.4"
|
||||
|
||||
[[package]]
|
||||
name = "ujson"
|
||||
version = "5.10.0"
|
||||
@@ -5997,4 +6090,4 @@ tools = ["crewai-tools"]
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = ">=3.10,<=3.13"
|
||||
content-hash = "4f3e5fddb5f0fc8fd143a8abe947ecac443213d595bd0eeed745ccb82dac2312"
|
||||
content-hash = "0dbf6f6e2e841fb3eec4ff87ea5d6b430f29702118fee91307983c6b2581e59e"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[tool.poetry]
|
||||
name = "crewai"
|
||||
version = "0.35.8"
|
||||
version = "0.36.0"
|
||||
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
|
||||
authors = ["Joao Moura <joao@crewai.com>"]
|
||||
readme = "README.md"
|
||||
@@ -21,7 +21,7 @@ opentelemetry-sdk = "^1.22.0"
|
||||
opentelemetry-exporter-otlp-proto-http = "^1.22.0"
|
||||
instructor = "1.3.3"
|
||||
regex = "^2023.12.25"
|
||||
crewai-tools = { version = "^0.4.7", optional = true }
|
||||
crewai-tools = { version = "^0.4.8", optional = true }
|
||||
click = "^8.1.7"
|
||||
python-dotenv = "^1.0.0"
|
||||
appdirs = "^1.4.4"
|
||||
@@ -45,7 +45,7 @@ mkdocs-material = { extras = ["imaging"], version = "^9.5.7" }
|
||||
mkdocs-material-extensions = "^1.3.1"
|
||||
pillow = "^10.2.0"
|
||||
cairosvg = "^2.7.1"
|
||||
crewai-tools = "^0.4.7"
|
||||
crewai-tools = "^0.4.8"
|
||||
|
||||
[tool.poetry.group.test.dependencies]
|
||||
pytest = "^8.0.0"
|
||||
|
||||
@@ -20,7 +20,7 @@ from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
|
||||
agentops = None
|
||||
try:
|
||||
import agentops
|
||||
import agentops # type: ignore # Name "agentops" already defined on line 21
|
||||
from agentops import track_agent
|
||||
except ImportError:
|
||||
|
||||
@@ -60,8 +60,8 @@ class Agent(BaseAgent):
|
||||
default=None,
|
||||
description="Maximum execution time for an agent to execute a task",
|
||||
)
|
||||
agent_ops_agent_name: str = None
|
||||
agent_ops_agent_id: str = None
|
||||
agent_ops_agent_name: str = None # type: ignore # Incompatible types in assignment (expression has type "None", variable has type "str")
|
||||
agent_ops_agent_id: str = None # type: ignore # Incompatible types in assignment (expression has type "None", variable has type "str")
|
||||
cache_handler: InstanceOf[CacheHandler] = Field(
|
||||
default=None, description="An instance of the CacheHandler class."
|
||||
)
|
||||
@@ -148,8 +148,7 @@ class Agent(BaseAgent):
|
||||
Output of the agent
|
||||
"""
|
||||
if self.tools_handler:
|
||||
# type: ignore # Incompatible types in assignment (expression has type "dict[Never, Never]", variable has type "ToolCalling")
|
||||
self.tools_handler.last_used_tool = {}
|
||||
self.tools_handler.last_used_tool = {} # type: ignore # Incompatible types in assignment (expression has type "dict[Never, Never]", variable has type "ToolCalling")
|
||||
|
||||
task_prompt = task.prompt()
|
||||
|
||||
@@ -169,8 +168,8 @@ class Agent(BaseAgent):
|
||||
task_prompt += self.i18n.slice("memory").format(memory=memory)
|
||||
|
||||
tools = tools or self.tools
|
||||
# type: ignore # Argument 1 to "_parse_tools" of "Agent" has incompatible type "list[Any] | None"; expected "list[Any]"
|
||||
parsed_tools = self._parse_tools(tools or [])
|
||||
|
||||
parsed_tools = self._parse_tools(tools or []) # type: ignore # Argument 1 to "_parse_tools" of "Agent" has incompatible type "list[Any] | None"; expected "list[Any]"
|
||||
self.create_agent_executor(tools=tools)
|
||||
self.agent_executor.tools = parsed_tools
|
||||
self.agent_executor.task = task
|
||||
@@ -196,7 +195,7 @@ class Agent(BaseAgent):
|
||||
# If there was any tool in self.tools_results that had result_as_answer
|
||||
# set to True, return the results of the last tool that had
|
||||
# result_as_answer set to True
|
||||
for tool_result in self.tools_results:
|
||||
for tool_result in self.tools_results: # type: ignore # Item "None" of "list[Any] | None" has no attribute "__iter__" (not iterable)
|
||||
if tool_result.get("result_as_answer", False):
|
||||
result = tool_result["result"]
|
||||
|
||||
@@ -300,7 +299,7 @@ class Agent(BaseAgent):
|
||||
def get_output_converter(self, llm, text, model, instructions):
|
||||
return Converter(llm=llm, text=text, model=model, instructions=instructions)
|
||||
|
||||
def _parse_tools(self, tools: List[Any]) -> List[LangChainTool]:
|
||||
def _parse_tools(self, tools: List[Any]) -> List[LangChainTool]: # type: ignore # Function "langchain_core.tools.tool" is not valid as a type
|
||||
"""Parse tools to be used for the task."""
|
||||
tools_list = []
|
||||
try:
|
||||
|
||||
@@ -191,7 +191,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
"""Get the converter class for the agent to create json/pydantic outputs."""
|
||||
pass
|
||||
|
||||
def copy(self: T) -> T:
|
||||
def copy(self: T) -> T: # type: ignore # Signature of "copy" incompatible with supertype "BaseModel"
|
||||
"""Create a deep copy of the Agent."""
|
||||
exclude = {
|
||||
"id",
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List, Optional, Union
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.task import Task
|
||||
from crewai.utilities import I18N
|
||||
@@ -53,7 +55,7 @@ class BaseAgentTools(BaseModel, ABC):
|
||||
# {"task": "....", "coworker": "...."}
|
||||
agent_name = agent.casefold().replace('"', "").replace("\n", "")
|
||||
|
||||
agent = [
|
||||
agent = [ # type: ignore # Incompatible types in assignment (expression has type "list[BaseAgent]", variable has type "str | None")
|
||||
available_agent
|
||||
for available_agent in self.agents
|
||||
if available_agent.role.casefold().replace("\n", "") == agent_name
|
||||
@@ -73,9 +75,9 @@ class BaseAgentTools(BaseModel, ABC):
|
||||
)
|
||||
|
||||
agent = agent[0]
|
||||
task = Task(
|
||||
task = Task( # type: ignore # Incompatible types in assignment (expression has type "Task", variable has type "str")
|
||||
description=task,
|
||||
agent=agent,
|
||||
expected_output="Your best answer to your coworker asking you this, accounting for the context shared.",
|
||||
)
|
||||
return agent.execute_task(task, context)
|
||||
return agent.execute_task(task, context) # type: ignore # "str" has no attribute "execute_task"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Optional
|
||||
|
||||
|
||||
from pydantic import BaseModel, Field, PrivateAttr
|
||||
|
||||
|
||||
@@ -28,7 +27,7 @@ class OutputConverter(BaseModel, ABC):
|
||||
model: Any = Field(description="The model to be used to convert the text.")
|
||||
instructions: str = Field(description="Conversion instructions to the LLM.")
|
||||
max_attempts: Optional[int] = Field(
|
||||
description="Max number of attemps to try to get the output formated.",
|
||||
description="Max number of attempts to try to get the output formatted.",
|
||||
default=3,
|
||||
)
|
||||
|
||||
@@ -42,7 +41,7 @@ class OutputConverter(BaseModel, ABC):
|
||||
"""Convert text to json."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _is_gpt(self, llm):
|
||||
@abstractmethod # type: ignore # Name "_is_gpt" already defined on line 25
|
||||
def _is_gpt(self, llm): # type: ignore # Name "_is_gpt" already defined on line 25
|
||||
"""Return if llm provided is of gpt from openai."""
|
||||
pass
|
||||
|
||||
@@ -15,19 +15,18 @@ from langchain.agents.agent import ExceptionTool
|
||||
from langchain.callbacks.manager import CallbackManagerForChainRun
|
||||
from langchain_core.agents import AgentAction, AgentFinish, AgentStep
|
||||
from langchain_core.exceptions import OutputParserException
|
||||
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_core.utils.input import get_color_mapping
|
||||
from pydantic import InstanceOf
|
||||
|
||||
from crewai.agents.agent_builder.base_agent_executor_mixin import (
|
||||
CrewAgentExecutorMixin,
|
||||
)
|
||||
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
|
||||
from crewai.utilities import I18N
|
||||
from crewai.utilities.constants import TRAINING_DATA_FILE
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
from crewai.utilities import I18N
|
||||
|
||||
|
||||
class CrewAgentExecutor(AgentExecutor, CrewAgentExecutorMixin):
|
||||
@@ -46,7 +45,7 @@ class CrewAgentExecutor(AgentExecutor, CrewAgentExecutorMixin):
|
||||
tools_handler: Optional[InstanceOf[ToolsHandler]] = None
|
||||
max_iterations: Optional[int] = 15
|
||||
have_forced_answer: bool = False
|
||||
force_answer_max_iterations: Optional[int] = None
|
||||
force_answer_max_iterations: Optional[int] = None # type: ignore # Incompatible types in assignment (expression has type "int | None", base class "CrewAgentExecutorMixin" defined the type as "int")
|
||||
step_callback: Optional[Any] = None
|
||||
system_template: Optional[str] = None
|
||||
prompt_template: Optional[str] = None
|
||||
|
||||
@@ -12,4 +12,4 @@ reporting_task:
|
||||
Make sure the report is detailed and contains any and all relevant information.
|
||||
expected_output: >
|
||||
A fully fledge reports with the mains topics, each with a full section of information.
|
||||
Formated as markdown with out '```'
|
||||
Formatted as markdown without '```'
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import asyncio
|
||||
import json
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
from concurrent.futures import Future
|
||||
from typing import Any, Dict, List, Optional, Tuple, Union
|
||||
|
||||
from langchain_core.callbacks import BaseCallbackHandler
|
||||
@@ -20,16 +22,21 @@ from pydantic_core import PydanticCustomError
|
||||
from crewai.agent import Agent
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.cache import CacheHandler
|
||||
from crewai.crews.crew_output import CrewOutput
|
||||
from crewai.memory.entity.entity_memory import EntityMemory
|
||||
from crewai.memory.long_term.long_term_memory import LongTermMemory
|
||||
from crewai.memory.short_term.short_term_memory import ShortTermMemory
|
||||
from crewai.process import Process
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.telemetry import Telemetry
|
||||
from crewai.tools.agent_tools import AgentTools
|
||||
from crewai.utilities import I18N, FileHandler, Logger, RPMController
|
||||
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
|
||||
from crewai.utilities.crew_json_encoder import CrewJSONEncoder
|
||||
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
|
||||
from crewai.utilities.file_handler import TaskOutputJsonHandler
|
||||
from crewai.utilities.formatter import aggregate_raw_outputs_from_task_outputs
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
|
||||
try:
|
||||
@@ -57,7 +64,6 @@ class Crew(BaseModel):
|
||||
max_rpm: Maximum number of requests per minute for the crew execution to be respected.
|
||||
prompt_file: Path to the prompt json file to be used for the crew.
|
||||
id: A unique identifier for the crew instance.
|
||||
full_output: Whether the crew should return the full output with all tasks outputs and token usage metrics or just the final output.
|
||||
task_callback: Callback to be executed after each task for every agents execution.
|
||||
step_callback: Callback to be executed after each step for every agents execution.
|
||||
share_crew: Whether you want to share the complete crew information and execution with crewAI to make the library better, and allow us to train models.
|
||||
@@ -68,6 +74,7 @@ class Crew(BaseModel):
|
||||
_rpm_controller: RPMController = PrivateAttr()
|
||||
_logger: Logger = PrivateAttr()
|
||||
_file_handler: FileHandler = PrivateAttr()
|
||||
_task_output_handler: TaskOutputJsonHandler = PrivateAttr()
|
||||
_cache_handler: InstanceOf[CacheHandler] = PrivateAttr(default=CacheHandler())
|
||||
_short_term_memory: Optional[InstanceOf[ShortTermMemory]] = PrivateAttr()
|
||||
_long_term_memory: Optional[InstanceOf[LongTermMemory]] = PrivateAttr()
|
||||
@@ -93,10 +100,6 @@ class Crew(BaseModel):
|
||||
default=None,
|
||||
description="Metrics for the LLM usage during all tasks execution.",
|
||||
)
|
||||
full_output: Optional[bool] = Field(
|
||||
default=False,
|
||||
description="Whether the crew should return the full output with all tasks outputs and token usage metrics or just the final output.",
|
||||
)
|
||||
manager_llm: Optional[Any] = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
@@ -133,6 +136,16 @@ class Crew(BaseModel):
|
||||
default=False,
|
||||
description="output_log_file",
|
||||
)
|
||||
task_execution_output_json_files: Optional[List[str]] = Field(
|
||||
default=None,
|
||||
description="List of file paths for task execution JSON files.",
|
||||
)
|
||||
execution_logs: List[Dict[str, Any]] = Field(
|
||||
default=[],
|
||||
description="List of execution logs for tasks",
|
||||
)
|
||||
|
||||
_log_file: str = PrivateAttr(default="crew_tasks_output.json")
|
||||
|
||||
@field_validator("id", mode="before")
|
||||
@classmethod
|
||||
@@ -165,6 +178,7 @@ class Crew(BaseModel):
|
||||
self._logger = Logger(self.verbose)
|
||||
if self.output_log_file:
|
||||
self._file_handler = FileHandler(self.output_log_file)
|
||||
self._task_output_handler = TaskOutputJsonHandler(self._log_file)
|
||||
self._rpm_controller = RPMController(max_rpm=self.max_rpm, logger=self._logger)
|
||||
self._telemetry = Telemetry()
|
||||
self._telemetry.set_tracer()
|
||||
@@ -232,7 +246,7 @@ class Crew(BaseModel):
|
||||
if task.agent is None:
|
||||
raise PydanticCustomError(
|
||||
"missing_agent_in_task",
|
||||
f"Sequential process error: Agent is missing in the task with the following description: {task.description}", # type: ignore Argument of type "str" cannot be assigned to parameter "message_template" of type "LiteralString"
|
||||
f"Sequential process error: Agent is missing in the task with the following description: {task.description}", # type: ignore # Argument of type "str" cannot be assigned to parameter "message_template" of type "LiteralString"
|
||||
{},
|
||||
)
|
||||
|
||||
@@ -314,39 +328,40 @@ class Crew(BaseModel):
|
||||
|
||||
def kickoff(
|
||||
self,
|
||||
inputs: Optional[Dict[str, Any]] = {},
|
||||
) -> Union[str, Dict[str, Any]]:
|
||||
inputs: Optional[Dict[str, Any]] = None,
|
||||
) -> CrewOutput:
|
||||
"""Starts the crew to work on its assigned tasks."""
|
||||
self._execution_span = self._telemetry.crew_execution_span(self, inputs)
|
||||
# type: ignore # Argument 1 to "_interpolate_inputs" of "Crew" has incompatible type "dict[str, Any] | None"; expected "dict[str, Any]"
|
||||
self._interpolate_inputs(inputs)
|
||||
self.execution_logs = []
|
||||
if inputs is not None:
|
||||
self._interpolate_inputs(inputs)
|
||||
# self._interpolate_inputs(inputs)
|
||||
self._set_tasks_callbacks()
|
||||
|
||||
i18n = I18N(prompt_file=self.prompt_file)
|
||||
|
||||
for agent in self.agents:
|
||||
# type: ignore # Argument 1 to "_interpolate_inputs" of "Crew" has incompatible type "dict[str, Any] | None"; expected "dict[str, Any]"
|
||||
agent.i18n = i18n
|
||||
# type: ignore[attr-defined] # Argument 1 to "_interpolate_inputs" of "Crew" has incompatible type "dict[str, Any] | None"; expected "dict[str, Any]"
|
||||
agent.crew = self # type: ignore[attr-defined]
|
||||
# TODO: Create an AgentFunctionCalling protocol for future refactoring
|
||||
if not agent.function_calling_llm:
|
||||
agent.function_calling_llm = self.function_calling_llm
|
||||
if not agent.function_calling_llm: # type: ignore # "BaseAgent" has no attribute "function_calling_llm"
|
||||
agent.function_calling_llm = self.function_calling_llm # type: ignore # "BaseAgent" has no attribute "function_calling_llm"
|
||||
|
||||
if agent.allow_code_execution:
|
||||
agent.tools += agent.get_code_execution_tools()
|
||||
if agent.allow_code_execution: # type: ignore # BaseAgent" has no attribute "allow_code_execution"
|
||||
agent.tools += agent.get_code_execution_tools() # type: ignore # "BaseAgent" has no attribute "get_code_execution_tools"; maybe "get_delegation_tools"?
|
||||
|
||||
if not agent.step_callback:
|
||||
agent.step_callback = self.step_callback
|
||||
if not agent.step_callback: # type: ignore # "BaseAgent" has no attribute "step_callback"
|
||||
agent.step_callback = self.step_callback # type: ignore # "BaseAgent" has no attribute "step_callback"
|
||||
|
||||
agent.create_agent_executor()
|
||||
|
||||
metrics = []
|
||||
|
||||
if self.process == Process.sequential:
|
||||
result = self._run_sequential_process()
|
||||
result = self._run_sequential_process(inputs)
|
||||
elif self.process == Process.hierarchical:
|
||||
result, manager_metrics = self._run_hierarchical_process()
|
||||
result, manager_metrics = self._run_hierarchical_process() # type: ignore # Incompatible types in assignment (expression has type "str | dict[str, Any]", variable has type "str")
|
||||
metrics.append(manager_metrics)
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
@@ -360,11 +375,9 @@ class Crew(BaseModel):
|
||||
|
||||
return result
|
||||
|
||||
def kickoff_for_each(
|
||||
self, inputs: List[Dict[str, Any]]
|
||||
) -> List[Union[str, Dict[str, Any]]]:
|
||||
def kickoff_for_each(self, inputs: List[Dict[str, Any]]) -> List[CrewOutput]:
|
||||
"""Executes the Crew's workflow for each input in the list and aggregates results."""
|
||||
results = []
|
||||
results: List[CrewOutput] = []
|
||||
|
||||
# Initialize the parent crew's usage metrics
|
||||
total_usage_metrics = {
|
||||
@@ -389,12 +402,12 @@ class Crew(BaseModel):
|
||||
return results
|
||||
|
||||
async def kickoff_async(
|
||||
self, inputs: Optional[Dict[str, Any]] = {}
|
||||
self, inputs: Optional[CrewOutput] = {}
|
||||
) -> Union[str, Dict]:
|
||||
"""Asynchronous kickoff method to start the crew execution."""
|
||||
return await asyncio.to_thread(self.kickoff, inputs)
|
||||
|
||||
async def kickoff_for_each_async(self, inputs: List[Dict]) -> List[Any]:
|
||||
async def kickoff_for_each_async(self, inputs: List[Dict]) -> List[CrewOutput]:
|
||||
crew_copies = [self.copy() for _ in inputs]
|
||||
|
||||
async def run_crew(crew, input_data):
|
||||
@@ -404,6 +417,10 @@ class Crew(BaseModel):
|
||||
asyncio.create_task(run_crew(crew_copies[i], inputs[i]))
|
||||
for i in range(len(inputs))
|
||||
]
|
||||
tasks = [
|
||||
asyncio.create_task(run_crew(crew_copies[i], inputs[i]))
|
||||
for i in range(len(inputs))
|
||||
]
|
||||
|
||||
results = await asyncio.gather(*tasks)
|
||||
|
||||
@@ -420,14 +437,68 @@ class Crew(BaseModel):
|
||||
|
||||
self.usage_metrics = total_usage_metrics
|
||||
|
||||
total_usage_metrics = {
|
||||
"total_tokens": 0,
|
||||
"prompt_tokens": 0,
|
||||
"completion_tokens": 0,
|
||||
"successful_requests": 0,
|
||||
}
|
||||
for crew in crew_copies:
|
||||
if crew.usage_metrics:
|
||||
for key in total_usage_metrics:
|
||||
total_usage_metrics[key] += crew.usage_metrics.get(key, 0)
|
||||
|
||||
self.usage_metrics = total_usage_metrics
|
||||
|
||||
return results
|
||||
|
||||
def _run_sequential_process(self) -> str:
|
||||
"""Executes tasks sequentially and returns the final output."""
|
||||
task_output = ""
|
||||
def _store_execution_log(self, task, output, task_index, inputs=None):
|
||||
log = {
|
||||
"task_id": str(task.id),
|
||||
"description": task.description,
|
||||
"expected_output": task.expected_output,
|
||||
"agent_role": task.agent.role if task.agent else "None",
|
||||
"output": {
|
||||
"description": task.description,
|
||||
"summary": task.description,
|
||||
"raw_output": output.raw_output,
|
||||
"pydantic_output": output.pydantic_output,
|
||||
"json_output": output.json_output,
|
||||
"agent": task.agent.role if task.agent else "None",
|
||||
},
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"task_index": task_index,
|
||||
# "output_py": output.pydantic_output,
|
||||
"inputs": inputs,
|
||||
# "task": task.model_dump(),
|
||||
}
|
||||
self.execution_logs.append(log)
|
||||
self._task_output_handler.append(log)
|
||||
|
||||
for task in self.tasks:
|
||||
if task.agent.allow_delegation: # type: ignore # Item "None" of "Agent | None" has no attribute "allow_delegation"
|
||||
def _run_sequential_process(
|
||||
self, inputs: Dict[str, Any] | None = None
|
||||
) -> CrewOutput:
|
||||
"""Executes tasks sequentially and returns the final output."""
|
||||
self.execution_logs = []
|
||||
task_outputs = self._execute_tasks(self.tasks, inputs=inputs)
|
||||
final_string_output = aggregate_raw_outputs_from_task_outputs(task_outputs)
|
||||
self._finish_execution(final_string_output)
|
||||
self.save_execution_logs()
|
||||
token_usage = self.calculate_usage_metrics()
|
||||
|
||||
return self._format_output(task_outputs, token_usage)
|
||||
|
||||
def _execute_tasks(
|
||||
self,
|
||||
tasks,
|
||||
start_index=0,
|
||||
is_replay=False,
|
||||
inputs: Dict[str, Any] | None = None,
|
||||
):
|
||||
task_outputs: List[TaskOutput] = []
|
||||
futures: List[Tuple[Task, Future[TaskOutput]]] = []
|
||||
for task_index, task in enumerate(tasks[start_index:], start=start_index):
|
||||
if task.agent and task.agent.allow_delegation:
|
||||
agents_for_delegation = [
|
||||
agent for agent in self.agents if agent != task.agent
|
||||
]
|
||||
@@ -435,38 +506,213 @@ class Crew(BaseModel):
|
||||
task.tools += task.agent.get_delegation_tools(agents_for_delegation)
|
||||
|
||||
role = task.agent.role if task.agent is not None else "None"
|
||||
self._logger.log("debug", f"== Working Agent: {role}", color="bold_purple")
|
||||
log_prefix = "== Replaying from" if is_replay else "=="
|
||||
log_color = "bold_blue" if is_replay else "bold_purple"
|
||||
self._logger.log(
|
||||
"info", f"== Starting Task: {task.description}", color="bold_purple"
|
||||
"debug", f"{log_prefix} Working Agent: {role}", color=log_color
|
||||
)
|
||||
self._logger.log(
|
||||
"info",
|
||||
f"{log_prefix} {'Replaying' if is_replay else 'Starting'} Task: {task.description}",
|
||||
color=log_color,
|
||||
)
|
||||
|
||||
if self.output_log_file:
|
||||
self._file_handler.log(
|
||||
agent=role, task=task.description, status="started"
|
||||
)
|
||||
output = task.execute(context=task_output)
|
||||
|
||||
if not task.async_execution:
|
||||
task_output = output
|
||||
if task.async_execution:
|
||||
context = aggregate_raw_outputs_from_task_outputs(task_outputs)
|
||||
future = task.execute_async(
|
||||
agent=task.agent, context=context, tools=task.tools
|
||||
)
|
||||
futures.append((task, future))
|
||||
else:
|
||||
if futures:
|
||||
task_outputs = self._process_async_tasks(
|
||||
futures, task_index, inputs
|
||||
)
|
||||
futures.clear()
|
||||
|
||||
role = task.agent.role if task.agent is not None else "None"
|
||||
self._logger.log("debug", f"== [{role}] Task output: {task_output}\n\n")
|
||||
context = aggregate_raw_outputs_from_task_outputs(task_outputs)
|
||||
task_output = task.execute_sync(
|
||||
agent=task.agent, context=context, tools=task.tools
|
||||
)
|
||||
task_outputs = [task_output]
|
||||
self._process_task_result(task, task_output)
|
||||
self._store_execution_log(task, task_output, task_index, inputs)
|
||||
|
||||
if self.output_log_file:
|
||||
self._file_handler.log(agent=role, task=task_output, status="completed")
|
||||
if futures:
|
||||
task_outputs = self._process_async_tasks(futures, len(tasks), inputs)
|
||||
|
||||
self._finish_execution(task_output)
|
||||
return task_outputs
|
||||
|
||||
def _process_task_result(self, task: Task, output: TaskOutput) -> None:
|
||||
role = task.agent.role if task.agent is not None else "None"
|
||||
self._logger.log("debug", f"== [{role}] Task output: {output}\n\n")
|
||||
if self.output_log_file:
|
||||
self._file_handler.log(agent=role, task=output, status="completed")
|
||||
|
||||
def _process_async_tasks(
|
||||
self,
|
||||
futures: List[Tuple[Task, Future[TaskOutput]]],
|
||||
task_index: int,
|
||||
inputs: Dict[str, Any] | None = None,
|
||||
) -> List[TaskOutput]:
|
||||
task_outputs = []
|
||||
for future_task, future in futures:
|
||||
task_output = future.result()
|
||||
task_outputs.append(task_output)
|
||||
self._process_task_result(future_task, task_output)
|
||||
self._store_execution_log(future_task, task_output, task_index, inputs)
|
||||
|
||||
return task_outputs
|
||||
|
||||
def replay_from_task(self, task_id: str):
|
||||
stored_outputs = self._load_stored_outputs()
|
||||
start_index = next(
|
||||
(
|
||||
index
|
||||
for (index, d) in enumerate(stored_outputs)
|
||||
if d["task_id"] == str(task_id)
|
||||
),
|
||||
None,
|
||||
)
|
||||
if start_index is None:
|
||||
raise ValueError(f"Task with id {task_id} not found in the crew's tasks.")
|
||||
# Create a map of task ID to stored output
|
||||
stored_output_map: Dict[str, dict] = {
|
||||
log["task_id"]: log["output"] for log in stored_outputs
|
||||
}
|
||||
|
||||
task_outputs: List[
|
||||
TaskOutput
|
||||
] = [] # will propogate the old outputs first to add context then fill the content with the new task outputs relative to the replay start
|
||||
futures: List[Tuple[Task, Future[TaskOutput]]] = []
|
||||
context = ""
|
||||
|
||||
inputs = stored_outputs[start_index].get("inputs", {})
|
||||
if inputs is not None:
|
||||
self._interpolate_inputs(inputs)
|
||||
for task_index, task in enumerate(self.tasks):
|
||||
if task_index < start_index:
|
||||
# Use stored output for tasks before the replay point
|
||||
if task.id in stored_output_map:
|
||||
stored_output = stored_output_map[task.id]
|
||||
task_output = TaskOutput(
|
||||
description=stored_output["description"],
|
||||
raw_output=stored_output["raw_output"],
|
||||
pydantic_output=stored_output["pydantic_output"],
|
||||
json_output=stored_output["json_output"],
|
||||
agent=stored_output["agent"],
|
||||
)
|
||||
task_outputs.append(task_output)
|
||||
context += (
|
||||
f"\nTask {task_index + 1} Output:\n{task_output.raw_output}"
|
||||
)
|
||||
else:
|
||||
role = task.agent.role if task.agent is not None else "None"
|
||||
log_color = "bold_blue"
|
||||
self._logger.log(
|
||||
"debug", f"Replaying Working Agent: {role}", color=log_color
|
||||
)
|
||||
self._logger.log(
|
||||
"info",
|
||||
f"Replaying Task: {task.description}",
|
||||
color=log_color,
|
||||
)
|
||||
|
||||
if self.output_log_file:
|
||||
self._file_handler.log(
|
||||
agent=role, task=task.description, status="started"
|
||||
)
|
||||
# Execute task for replay and subsequent tasks
|
||||
if task.async_execution:
|
||||
future = task.execute_async(
|
||||
agent=task.agent, context=context, tools=task.tools
|
||||
)
|
||||
futures.append((task, future))
|
||||
else:
|
||||
if futures:
|
||||
async_outputs = self._process_async_tasks(
|
||||
futures, task_index, inputs
|
||||
)
|
||||
task_outputs.extend(async_outputs)
|
||||
for output in async_outputs:
|
||||
context += (
|
||||
f"\nTask {task_index + 1} Output:\n{output.raw_output}"
|
||||
)
|
||||
futures.clear()
|
||||
task_output = task.execute_sync(
|
||||
agent=task.agent, context=context, tools=task.tools
|
||||
)
|
||||
task_outputs.append(task_output)
|
||||
self._process_task_result(task, task_output)
|
||||
self._store_execution_log(task, task_output, task_index, inputs)
|
||||
context += (
|
||||
f"\nTask {task_index + 1} Output:\n{task_output.raw_output}"
|
||||
)
|
||||
|
||||
# Process any remaining async tasks
|
||||
if futures:
|
||||
async_outputs = self._process_async_tasks(futures, len(self.tasks), inputs)
|
||||
task_outputs.extend(async_outputs)
|
||||
# Calculate usage metrics
|
||||
token_usage = self.calculate_usage_metrics()
|
||||
|
||||
# type: ignore # Incompatible return value type (got "tuple[str, Any]", expected "str")
|
||||
return self._format_output(task_output, token_usage)
|
||||
# Format and return the final output
|
||||
return self._format_output(task_outputs, token_usage)
|
||||
|
||||
def _run_hierarchical_process(
|
||||
self,
|
||||
) -> Tuple[Union[str, Dict[str, Any]], Dict[str, Any]]:
|
||||
def _load_stored_outputs(self) -> List[Dict]:
|
||||
try:
|
||||
with open(self._log_file, "r") as f:
|
||||
return json.load(f)
|
||||
except FileNotFoundError:
|
||||
self._logger.log(
|
||||
"warning",
|
||||
f"Log file {self._log_file} not found. Starting with empty logs.",
|
||||
)
|
||||
return []
|
||||
except json.JSONDecodeError:
|
||||
self._logger.log(
|
||||
"error",
|
||||
f"Failed to parse log file {self._log_file}. Starting with empty logs.",
|
||||
)
|
||||
return []
|
||||
|
||||
def save_execution_logs(self, filename: str | None = None):
|
||||
"""Save execution logs to a file."""
|
||||
if filename:
|
||||
self._log_file = filename
|
||||
try:
|
||||
with open(self._log_file, "w") as f:
|
||||
json.dump(self.execution_logs, f, indent=2, cls=CrewJSONEncoder)
|
||||
except Exception as e:
|
||||
self._logger.log("error", f"Failed to save execution logs: {str(e)}")
|
||||
|
||||
def load_execution_logs(self, filename: str | None = None):
|
||||
"""Load execution logs from a file."""
|
||||
if filename:
|
||||
self._log_file = filename
|
||||
try:
|
||||
with open(self._log_file, "r") as f:
|
||||
self.execution_logs = json.load(f)
|
||||
except FileNotFoundError:
|
||||
self._logger.log(
|
||||
"warning",
|
||||
f"Log file {self._log_file} not found. Starting with empty logs.",
|
||||
)
|
||||
self.execution_logs = []
|
||||
except json.JSONDecodeError:
|
||||
self._logger.log(
|
||||
"error",
|
||||
f"Failed to parse log file {self._log_file}. Starting with empty logs.",
|
||||
)
|
||||
self.execution_logs = []
|
||||
|
||||
def _run_hierarchical_process(self) -> Tuple[CrewOutput, Dict[str, Any]]:
|
||||
"""Creates and assigns a manager agent to make sure the crew completes the tasks."""
|
||||
|
||||
i18n = I18N(prompt_file=self.prompt_file)
|
||||
if self.manager_agent is not None:
|
||||
self.manager_agent.allow_delegation = True
|
||||
@@ -485,7 +731,8 @@ class Crew(BaseModel):
|
||||
)
|
||||
self.manager_agent = manager
|
||||
|
||||
task_output = ""
|
||||
task_outputs: List[TaskOutput] = []
|
||||
futures: List[Tuple[Task, Future[TaskOutput]]] = []
|
||||
|
||||
for task in self.tasks:
|
||||
self._logger.log("debug", f"Working Agent: {manager.role}")
|
||||
@@ -496,26 +743,50 @@ class Crew(BaseModel):
|
||||
agent=manager.role, task=task.description, status="started"
|
||||
)
|
||||
|
||||
if task.agent:
|
||||
manager.tools = task.agent.get_delegation_tools([task.agent])
|
||||
else:
|
||||
manager.tools = manager.get_delegation_tools(self.agents)
|
||||
task_output = task.execute(
|
||||
agent=manager, context=task_output, tools=manager.tools
|
||||
)
|
||||
|
||||
self._logger.log("debug", f"[{manager.role}] Task output: {task_output}")
|
||||
if self.output_log_file:
|
||||
self._file_handler.log(
|
||||
agent=manager.role, task=task_output, status="completed"
|
||||
if task.async_execution:
|
||||
context = aggregate_raw_outputs_from_task_outputs(task_outputs)
|
||||
future = task.execute_async(
|
||||
agent=manager, context=context, tools=manager.tools
|
||||
)
|
||||
futures.append((task, future))
|
||||
else:
|
||||
# Before executing a synchronous task, wait for all async tasks to complete
|
||||
if futures:
|
||||
# Clear task_outputs before processing async tasks
|
||||
task_outputs = []
|
||||
for future_task, future in futures:
|
||||
task_output = future.result()
|
||||
task_outputs.append(task_output)
|
||||
self._process_task_result(future_task, task_output)
|
||||
|
||||
self._finish_execution(task_output)
|
||||
# Clear the futures list after processing all async results
|
||||
futures.clear()
|
||||
|
||||
context = aggregate_raw_outputs_from_task_outputs(task_outputs)
|
||||
task_output = task.execute_sync(
|
||||
agent=manager, context=context, tools=manager.tools
|
||||
)
|
||||
task_outputs = [task_output]
|
||||
self._process_task_result(task, task_output)
|
||||
|
||||
# Process any remaining async results
|
||||
if futures:
|
||||
# Clear task_outputs before processing async tasks
|
||||
task_outputs = []
|
||||
for future_task, future in futures:
|
||||
task_output = future.result()
|
||||
task_outputs.append(task_output)
|
||||
self._process_task_result(future_task, task_output)
|
||||
|
||||
final_string_output = aggregate_raw_outputs_from_task_outputs(task_outputs)
|
||||
self._finish_execution(final_string_output)
|
||||
|
||||
# type: ignore # Incompatible return value type (got "tuple[str, Any]", expected "str")
|
||||
token_usage = self.calculate_usage_metrics()
|
||||
|
||||
return self._format_output(task_output, token_usage), token_usage
|
||||
return (
|
||||
self._format_output(task_outputs, token_usage),
|
||||
token_usage,
|
||||
)
|
||||
|
||||
def copy(self):
|
||||
"""Create a deep copy of the Crew."""
|
||||
@@ -568,30 +839,26 @@ class Crew(BaseModel):
|
||||
agent.interpolate_inputs(inputs)
|
||||
|
||||
def _format_output(
|
||||
self, output: str, token_usage: Optional[Dict[str, Any]] = None
|
||||
) -> Union[str, Dict[str, Any]]:
|
||||
self, output: List[TaskOutput], token_usage: Optional[Dict[str, Any]]
|
||||
) -> CrewOutput:
|
||||
"""
|
||||
Formats the output of the crew execution.
|
||||
If full_output is True, then returned data type will be a dictionary else returned outputs are string
|
||||
"""
|
||||
return CrewOutput(
|
||||
output=output,
|
||||
tasks_output=[task.output for task in self.tasks if task and task.output],
|
||||
token_usage=token_usage,
|
||||
)
|
||||
|
||||
if self.full_output:
|
||||
return { # type: ignore # Incompatible return value type (got "dict[str, Sequence[str | TaskOutput | None]]", expected "str")
|
||||
"final_output": output,
|
||||
"tasks_outputs": [task.output for task in self.tasks if task],
|
||||
"usage_metrics": token_usage,
|
||||
}
|
||||
else:
|
||||
return output
|
||||
|
||||
def _finish_execution(self, output) -> None:
|
||||
def _finish_execution(self, final_string_output: str) -> None:
|
||||
if self.max_rpm:
|
||||
self._rpm_controller.stop_rpm_counter()
|
||||
if agentops:
|
||||
agentops.end_session(
|
||||
end_state="Success", end_state_reason="Finished Execution"
|
||||
end_state="Success",
|
||||
end_state_reason="Finished Execution",
|
||||
)
|
||||
self._telemetry.end_crew(self, output)
|
||||
self._telemetry.end_crew(self, final_string_output)
|
||||
|
||||
def calculate_usage_metrics(self) -> Dict[str, int]:
|
||||
"""Calculates and returns the usage metrics."""
|
||||
|
||||
1
src/crewai/crews/__init__.py
Normal file
1
src/crewai/crews/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
from .crew_output import CrewOutput
|
||||
44
src/crewai/crews/crew_output.py
Normal file
44
src/crewai/crews/crew_output.py
Normal file
@@ -0,0 +1,44 @@
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.utilities.formatter import aggregate_raw_outputs_from_task_outputs
|
||||
|
||||
|
||||
class CrewOutput(BaseModel):
|
||||
output: List[TaskOutput] = Field(description="Result of the final task")
|
||||
tasks_output: list[TaskOutput] = Field(
|
||||
description="Output of each task", default=[]
|
||||
)
|
||||
token_usage: Dict[str, Any] = Field(
|
||||
description="Processed token summary", default={}
|
||||
)
|
||||
|
||||
# TODO: Ask @joao what is the desired behavior here
|
||||
def result(
|
||||
self,
|
||||
) -> List[str | BaseModel | Dict[str, Any]]:
|
||||
"""Return the result of the task based on the available output."""
|
||||
results = [output.result() for output in self.output]
|
||||
return results
|
||||
|
||||
def raw_output(self) -> str:
|
||||
"""Return the raw output of the task."""
|
||||
return aggregate_raw_outputs_from_task_outputs(self.output)
|
||||
|
||||
def to_output_dict(self) -> List[Dict[str, Any]]:
|
||||
output_dict = [output.to_output_dict() for output in self.output]
|
||||
return output_dict
|
||||
|
||||
def __getitem__(self, key: str) -> Any:
|
||||
if len(self.output) == 0:
|
||||
return None
|
||||
elif len(self.output) == 1:
|
||||
return self.output[0][key]
|
||||
else:
|
||||
return [output[key] for output in self.output]
|
||||
|
||||
# TODO: Confirm with Joao that we want to print the raw output and not the object
|
||||
def __str__(self):
|
||||
return str(self.raw_output())
|
||||
@@ -2,6 +2,7 @@ import os
|
||||
import re
|
||||
import threading
|
||||
import uuid
|
||||
from concurrent.futures import Future
|
||||
from copy import copy
|
||||
from typing import Any, Dict, List, Optional, Type, Union
|
||||
|
||||
@@ -13,7 +14,8 @@ from pydantic_core import PydanticCustomError
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.telemetry.telemetry import Telemetry
|
||||
from crewai.utilities.converter import ConverterError
|
||||
from crewai.utilities.converter import Converter, ConverterError
|
||||
from crewai.utilities.formatter import aggregate_raw_outputs_from_task_outputs
|
||||
from crewai.utilities.i18n import I18N
|
||||
from crewai.utilities.printer import Printer
|
||||
from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
|
||||
@@ -161,78 +163,89 @@ class Task(BaseModel):
|
||||
"""Wait for asynchronous task completion and return the output."""
|
||||
assert self.async_execution, "Task is not set to be executed asynchronously."
|
||||
|
||||
if self._thread:
|
||||
self._thread.join()
|
||||
self._thread = None
|
||||
if self._future:
|
||||
self._future.result() # Wait for the future to complete
|
||||
self._future = None
|
||||
|
||||
assert self.output, "Task output is not set."
|
||||
|
||||
return self.output.exported_output
|
||||
|
||||
def execute( # type: ignore # Missing return statement
|
||||
def execute_sync(
|
||||
self,
|
||||
agent: Optional[BaseAgent] = None,
|
||||
context: Optional[str] = None,
|
||||
tools: Optional[List[Any]] = None,
|
||||
) -> TaskOutput:
|
||||
"""Execute the task synchronously."""
|
||||
return self._execute_core(agent, context, tools)
|
||||
|
||||
def execute_async(
|
||||
self,
|
||||
agent: BaseAgent | None = None,
|
||||
context: Optional[str] = None,
|
||||
tools: Optional[List[Any]] = None,
|
||||
) -> str:
|
||||
"""Execute the task.
|
||||
) -> Future[TaskOutput]:
|
||||
"""Execute the task asynchronously."""
|
||||
future = Future()
|
||||
threading.Thread(
|
||||
target=self._execute_task_async, args=(agent, context, tools, future)
|
||||
).start()
|
||||
return future
|
||||
|
||||
Returns:
|
||||
Output of the task.
|
||||
"""
|
||||
|
||||
self._execution_span = self._telemetry.task_started(self)
|
||||
def _execute_task_async(
|
||||
self,
|
||||
agent: Optional[BaseAgent],
|
||||
context: Optional[str],
|
||||
tools: Optional[List[Any]],
|
||||
future: Future[TaskOutput],
|
||||
) -> None:
|
||||
"""Execute the task asynchronously with context handling."""
|
||||
result = self._execute_core(agent, context, tools)
|
||||
future.set_result(result)
|
||||
|
||||
def _execute_core(
|
||||
self,
|
||||
agent: Optional[BaseAgent],
|
||||
context: Optional[str],
|
||||
tools: Optional[List[Any]],
|
||||
) -> TaskOutput:
|
||||
"""Run the core execution logic of the task."""
|
||||
agent = agent or self.agent
|
||||
if not agent:
|
||||
raise Exception(
|
||||
f"The task '{self.description}' has no agent assigned, therefore it can't be executed directly and should be executed in a Crew using a specific process that support that, like hierarchical."
|
||||
)
|
||||
|
||||
self._execution_span = self._telemetry.task_started(crew=agent.crew, task=self)
|
||||
|
||||
if self.context:
|
||||
# type: ignore # Incompatible types in assignment (expression has type "list[Never]", variable has type "str | None")
|
||||
context = []
|
||||
task_outputs: List[TaskOutput] = []
|
||||
for task in self.context:
|
||||
if task.async_execution:
|
||||
task.wait_for_completion()
|
||||
# if task.async_execution:
|
||||
# task.wait_for_completion()
|
||||
if task.output:
|
||||
# type: ignore # Item "str" of "str | None" has no attribute "append"
|
||||
context.append(task.output.raw_output)
|
||||
# type: ignore # Argument 1 to "join" of "str" has incompatible type "str | None"; expected "Iterable[str]"
|
||||
context = "\n".join(context)
|
||||
task_outputs.append(task.output)
|
||||
context = aggregate_raw_outputs_from_task_outputs(task_outputs)
|
||||
|
||||
self.prompt_context = context
|
||||
tools = tools or self.tools
|
||||
|
||||
if self.async_execution:
|
||||
self._thread = threading.Thread(
|
||||
target=self._execute, args=(agent, self, context, tools)
|
||||
)
|
||||
self._thread.start()
|
||||
else:
|
||||
result = self._execute(
|
||||
task=self,
|
||||
agent=agent,
|
||||
context=context,
|
||||
tools=tools,
|
||||
)
|
||||
return result
|
||||
|
||||
def _execute(self, agent: "BaseAgent", task, context, tools):
|
||||
result = agent.execute_task(
|
||||
task=task,
|
||||
task=self,
|
||||
context=context,
|
||||
tools=tools,
|
||||
)
|
||||
exported_output = self._export_output(result)
|
||||
|
||||
# type: ignore # the responses are usually str but need to figure out a more elegant solution here
|
||||
self.output = TaskOutput(
|
||||
task_output = TaskOutput(
|
||||
description=self.description,
|
||||
exported_output=exported_output,
|
||||
raw_output=result,
|
||||
pydantic_output=exported_output["pydantic"],
|
||||
json_output=exported_output["json"],
|
||||
agent=agent.role,
|
||||
)
|
||||
self.output = task_output
|
||||
|
||||
if self.callback:
|
||||
self.callback(self.output)
|
||||
@@ -241,7 +254,7 @@ class Task(BaseModel):
|
||||
self._telemetry.task_ended(self._execution_span, self)
|
||||
self._execution_span = None
|
||||
|
||||
return exported_output
|
||||
return task_output
|
||||
|
||||
def prompt(self) -> str:
|
||||
"""Prompt the task.
|
||||
@@ -289,7 +302,7 @@ class Task(BaseModel):
|
||||
copied_data = {k: v for k, v in copied_data.items() if v is not None}
|
||||
|
||||
cloned_context = (
|
||||
[task.copy() for task in self.context] if self.context else None
|
||||
[task.copy(agents) for task in self.context] if self.context else None
|
||||
)
|
||||
|
||||
def get_agent_by_role(role: str) -> Union["BaseAgent", None]:
|
||||
@@ -307,81 +320,102 @@ class Task(BaseModel):
|
||||
|
||||
return copied_task
|
||||
|
||||
def _export_output(self, result: str) -> Any:
|
||||
exported_result = result
|
||||
instructions = "I'm gonna convert this raw text into valid JSON."
|
||||
def _export_output(
|
||||
self, result: str
|
||||
) -> Dict[str, Union[BaseModel, Dict[str, Any]]]:
|
||||
output = {
|
||||
"pydantic": None,
|
||||
"json": None,
|
||||
}
|
||||
|
||||
if self.output_pydantic or self.output_json:
|
||||
model = self.output_pydantic or self.output_json
|
||||
model_output = self._convert_to_model(result)
|
||||
output["pydantic"] = (
|
||||
model_output if isinstance(model_output, BaseModel) else None
|
||||
)
|
||||
output["json"] = model_output if isinstance(model_output, dict) else None
|
||||
|
||||
# try to convert task_output directly to pydantic/json
|
||||
if self.output_file:
|
||||
self._save_output(output["raw"])
|
||||
|
||||
return output
|
||||
|
||||
def _convert_to_model(self, result: str) -> Union[dict, BaseModel, str]:
|
||||
model = self.output_pydantic or self.output_json
|
||||
try:
|
||||
return self._validate_model(result, model)
|
||||
except Exception:
|
||||
return self._handle_partial_json(result, model)
|
||||
|
||||
def _validate_model(
|
||||
self, result: str, model: Type[BaseModel]
|
||||
) -> Union[dict, BaseModel]:
|
||||
exported_result = model.model_validate_json(result)
|
||||
if self.output_json:
|
||||
return exported_result.model_dump()
|
||||
return exported_result
|
||||
|
||||
def _handle_partial_json(
|
||||
self, result: str, model: Type[BaseModel]
|
||||
) -> Union[dict, BaseModel, str]:
|
||||
match = re.search(r"({.*})", result, re.DOTALL)
|
||||
if match:
|
||||
try:
|
||||
# type: ignore # Item "None" of "type[BaseModel] | None" has no attribute "model_validate_json"
|
||||
exported_result = model.model_validate_json(result)
|
||||
exported_result = model.model_validate_json(match.group(0))
|
||||
if self.output_json:
|
||||
# type: ignore # "str" has no attribute "model_dump"
|
||||
return exported_result.model_dump()
|
||||
return exported_result
|
||||
except Exception:
|
||||
# sometimes the response contains valid JSON in the middle of text
|
||||
match = re.search(r"({.*})", result, re.DOTALL)
|
||||
if match:
|
||||
try:
|
||||
# type: ignore # Item "None" of "type[BaseModel] | None" has no attribute "model_validate_json"
|
||||
exported_result = model.model_validate_json(match.group(0))
|
||||
if self.output_json:
|
||||
# type: ignore # "str" has no attribute "model_dump"
|
||||
return exported_result.model_dump()
|
||||
return exported_result
|
||||
except Exception:
|
||||
pass
|
||||
pass
|
||||
|
||||
# type: ignore # Item "None" of "BaseAgent | None" has no attribute "function_calling_llm"
|
||||
llm = getattr(self.agent, "function_calling_llm", None) or self.agent.llm
|
||||
if not self._is_gpt(llm):
|
||||
# type: ignore # Argument "model" to "PydanticSchemaParser" has incompatible type "type[BaseModel] | None"; expected "type[BaseModel]"
|
||||
model_schema = PydanticSchemaParser(model=model).get_schema()
|
||||
instructions = f"{instructions}\n\nThe json should have the following structure, with the following keys:\n{model_schema}"
|
||||
return self._convert_with_instructions(result, model)
|
||||
|
||||
converter = self.agent.get_output_converter(
|
||||
llm=llm, text=result, model=model, instructions=instructions
|
||||
def _convert_with_instructions(
|
||||
self, result: str, model: Type[BaseModel]
|
||||
) -> Union[dict, BaseModel, str]:
|
||||
llm = self.agent.function_calling_llm or self.agent.llm
|
||||
instructions = self._get_conversion_instructions(model, llm)
|
||||
|
||||
converter = Converter(
|
||||
llm=llm, text=result, model=model, instructions=instructions
|
||||
)
|
||||
exported_result = (
|
||||
converter.to_pydantic() if self.output_pydantic else converter.to_json()
|
||||
)
|
||||
|
||||
if isinstance(exported_result, ConverterError):
|
||||
Printer().print(
|
||||
content=f"{exported_result.message} Using raw output instead.",
|
||||
color="red",
|
||||
)
|
||||
|
||||
if self.output_pydantic:
|
||||
exported_result = converter.to_pydantic()
|
||||
elif self.output_json:
|
||||
exported_result = converter.to_json()
|
||||
|
||||
if isinstance(exported_result, ConverterError):
|
||||
Printer().print(
|
||||
content=f"{exported_result.message} Using raw output instead.",
|
||||
color="red",
|
||||
)
|
||||
exported_result = result
|
||||
|
||||
if self.output_file:
|
||||
content = (
|
||||
# type: ignore # "str" has no attribute "json"
|
||||
exported_result
|
||||
if not self.output_pydantic
|
||||
else exported_result.model_dump_json()
|
||||
)
|
||||
self._save_file(content)
|
||||
return result
|
||||
|
||||
return exported_result
|
||||
|
||||
def _get_conversion_instructions(self, model: Type[BaseModel], llm: Any) -> str:
|
||||
instructions = "I'm gonna convert this raw text into valid JSON."
|
||||
if not self._is_gpt(llm):
|
||||
model_schema = PydanticSchemaParser(model=model).get_schema()
|
||||
instructions = f"{instructions}\n\nThe json should have the following structure, with the following keys:\n{model_schema}"
|
||||
return instructions
|
||||
|
||||
def _save_output(self, content: str) -> None:
|
||||
directory = os.path.dirname(self.output_file)
|
||||
if directory and not os.path.exists(directory):
|
||||
os.makedirs(directory)
|
||||
with open(self.output_file, "w", encoding="utf-8") as file:
|
||||
file.write(content)
|
||||
|
||||
def _is_gpt(self, llm) -> bool:
|
||||
return isinstance(llm, ChatOpenAI) and llm.openai_api_base is None
|
||||
|
||||
def _save_file(self, result: Any) -> None:
|
||||
# type: ignore # Value of type variable "AnyOrLiteralStr" of "dirname" cannot be "str | None"
|
||||
directory = os.path.dirname(self.output_file)
|
||||
directory = os.path.dirname(self.output_file) # type: ignore # Value of type variable "AnyOrLiteralStr" of "dirname" cannot be "str | None"
|
||||
|
||||
if directory and not os.path.exists(directory):
|
||||
os.makedirs(directory)
|
||||
|
||||
# type: ignore # Argument 1 to "open" has incompatible type "str | None"; expected "int | str | bytes | PathLike[str] | PathLike[bytes]"
|
||||
with open(self.output_file, "w", encoding="utf-8") as file:
|
||||
with open(self.output_file, "w", encoding="utf-8") as file: # type: ignore # Argument 1 to "open" has incompatible type "str | None"; expected "int | str | bytes | PathLike[str] | PathLike[bytes]"
|
||||
file.write(result)
|
||||
return None
|
||||
|
||||
|
||||
@@ -1,24 +1,56 @@
|
||||
from typing import Optional, Union
|
||||
from typing import Any, Dict, Optional, Union
|
||||
|
||||
from pydantic import BaseModel, Field, model_validator
|
||||
|
||||
|
||||
# TODO: This is a breaking change. Confirm with @joao
|
||||
class TaskOutput(BaseModel):
|
||||
"""Class that represents the result of a task."""
|
||||
|
||||
description: str = Field(description="Description of the task")
|
||||
summary: Optional[str] = Field(description="Summary of the task", default=None)
|
||||
exported_output: Union[str, BaseModel] = Field(
|
||||
description="Output of the task", default=None
|
||||
raw_output: str = Field(description="Result of the task")
|
||||
pydantic_output: Optional[BaseModel] = Field(
|
||||
description="Pydantic model output", default=None
|
||||
)
|
||||
json_output: Optional[Dict[str, Any]] = Field(
|
||||
description="JSON output", default=None
|
||||
)
|
||||
agent: str = Field(description="Agent that executed the task")
|
||||
raw_output: str = Field(description="Result of the task")
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_summary(self):
|
||||
"""Set the summary field based on the description."""
|
||||
excerpt = " ".join(self.description.split(" ")[:10])
|
||||
self.summary = f"{excerpt}..."
|
||||
return self
|
||||
|
||||
def result(self):
|
||||
return self.exported_output
|
||||
# TODO: Ask @joao what is the desired behavior here
|
||||
def result(self) -> Union[str, BaseModel, Dict[str, Any]]:
|
||||
"""Return the result of the task based on the available output."""
|
||||
if self.pydantic_output:
|
||||
return self.pydantic_output
|
||||
elif self.json_output:
|
||||
return self.json_output
|
||||
else:
|
||||
return self.raw_output
|
||||
|
||||
def __getitem__(self, key: str) -> Any:
|
||||
"""Retrieve a value from the pydantic_output or json_output based on the key."""
|
||||
if self.pydantic_output and hasattr(self.pydantic_output, key):
|
||||
return getattr(self.pydantic_output, key)
|
||||
if self.json_output and key in self.json_output:
|
||||
return self.json_output[key]
|
||||
raise KeyError(f"Key '{key}' not found in pydantic_output or json_output")
|
||||
|
||||
def to_output_dict(self) -> Dict[str, Any]:
|
||||
"""Convert json_output and pydantic_output to a dictionary."""
|
||||
output_dict = {}
|
||||
if self.json_output:
|
||||
output_dict.update(self.json_output)
|
||||
if self.pydantic_output:
|
||||
output_dict.update(self.pydantic_output.model_dump())
|
||||
return output_dict
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.raw_output
|
||||
|
||||
@@ -156,18 +156,35 @@ class Telemetry:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def task_started(self, task: Task) -> Span | None:
|
||||
def task_started(self, crew: Crew, task: Task) -> Span | None:
|
||||
"""Records task started in a crew."""
|
||||
if self.ready:
|
||||
try:
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Task Execution")
|
||||
|
||||
created_span = tracer.start_span("Task Created")
|
||||
|
||||
self._add_attribute(created_span, "task_id", str(task.id))
|
||||
|
||||
if crew.share_crew:
|
||||
self._add_attribute(
|
||||
created_span, "formatted_description", task.description
|
||||
)
|
||||
self._add_attribute(
|
||||
created_span, "formatted_expected_output", task.expected_output
|
||||
)
|
||||
|
||||
created_span.set_status(Status(StatusCode.OK))
|
||||
created_span.end()
|
||||
|
||||
self._add_attribute(span, "task_id", str(task.id))
|
||||
self._add_attribute(span, "formatted_description", task.description)
|
||||
self._add_attribute(
|
||||
span, "formatted_expected_output", task.expected_output
|
||||
)
|
||||
|
||||
if crew.share_crew:
|
||||
self._add_attribute(span, "formatted_description", task.description)
|
||||
self._add_attribute(
|
||||
span, "formatted_expected_output", task.expected_output
|
||||
)
|
||||
|
||||
return span
|
||||
except Exception:
|
||||
@@ -320,7 +337,7 @@ class Telemetry:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def end_crew(self, crew, output):
|
||||
def end_crew(self, crew, final_string_output):
|
||||
if (self.ready) and (crew.share_crew):
|
||||
try:
|
||||
self._add_attribute(
|
||||
@@ -328,7 +345,9 @@ class Telemetry:
|
||||
"crewai_version",
|
||||
pkg_resources.get_distribution("crewai").version,
|
||||
)
|
||||
self._add_attribute(crew._execution_span, "crew_output", output)
|
||||
self._add_attribute(
|
||||
crew._execution_span, "crew_output", final_string_output
|
||||
)
|
||||
self._add_attribute(
|
||||
crew._execution_span,
|
||||
"crew_tasks_output",
|
||||
|
||||
@@ -8,7 +8,7 @@ from pydantic.v1 import BaseModel, Field
|
||||
class ToolCalling(BaseModel):
|
||||
tool_name: str = Field(..., description="The name of the tool to be called.")
|
||||
arguments: Optional[Dict[str, Any]] = Field(
|
||||
..., description="A dictinary of arguments to be passed to the tool."
|
||||
..., description="A dictionary of arguments to be passed to the tool."
|
||||
)
|
||||
|
||||
|
||||
@@ -17,5 +17,5 @@ class InstructorToolCalling(PydanticBaseModel):
|
||||
..., description="The name of the tool to be called."
|
||||
)
|
||||
arguments: Optional[Dict[str, Any]] = PydanticField(
|
||||
..., description="A dictinary of arguments to be passed to the tool."
|
||||
..., description="A dictionary of arguments to be passed to the tool."
|
||||
)
|
||||
|
||||
@@ -11,11 +11,10 @@ from crewai.telemetry import Telemetry
|
||||
from crewai.tools.tool_calling import InstructorToolCalling, ToolCalling
|
||||
from crewai.utilities import I18N, Converter, ConverterError, Printer
|
||||
|
||||
agentops = None
|
||||
try:
|
||||
import agentops
|
||||
except ImportError:
|
||||
pass
|
||||
agentops = None
|
||||
|
||||
OPENAI_BIGGER_MODELS = ["gpt-4"]
|
||||
|
||||
@@ -120,7 +119,7 @@ class ToolUsage:
|
||||
attempts=self._run_attempts,
|
||||
)
|
||||
result = self._format_result(result=result) # type: ignore # "_format_result" of "ToolUsage" does not return a value (it only ever returns None)
|
||||
return result # type: ignore # Fix the reutrn type of this function
|
||||
return result # type: ignore # Fix the return type of this function
|
||||
|
||||
except Exception:
|
||||
self.task.increment_tools_errors()
|
||||
@@ -216,7 +215,7 @@ class ToolUsage:
|
||||
hasattr(original_tool, "result_as_answer")
|
||||
and original_tool.result_as_answer # type: ignore # Item "None" of "Any | None" has no attribute "cache_function"
|
||||
):
|
||||
result_as_answer = original_tool.result_as_answer
|
||||
result_as_answer = original_tool.result_as_answer # type: ignore # Item "None" of "Any | None" has no attribute "result_as_answer"
|
||||
data["result_as_answer"] = result_as_answer
|
||||
|
||||
self.agent.tools_results.append(data)
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nI just remembered the expected format I must follow:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task\nYour final answer must be the great and the most complete as possible, it must be outcome described\n\n",
|
||||
"task_with_context": "{task}\n\nThis is the context you're working with:\n{context}",
|
||||
"expected_output": "\nThis is the expect criteria for your final answer: {expected_output} \n you MUST return the actual complete content as the final answer, not a summary.",
|
||||
"human_feedback": "You got human feedback on your work, re-avaluate it and give a new Final Answer when ready.\n {human_feedback}",
|
||||
"human_feedback": "You got human feedback on your work, re-evaluate it and give a new Final Answer when ready.\n {human_feedback}",
|
||||
"getting_input": "This is the agent's final answer: {final_answer}\nPlease provide feedback: "
|
||||
},
|
||||
"errors": {
|
||||
|
||||
17
src/crewai/utilities/crew_json_encoder.py
Normal file
17
src/crewai/utilities/crew_json_encoder.py
Normal file
@@ -0,0 +1,17 @@
|
||||
from datetime import datetime
|
||||
import json
|
||||
from uuid import UUID
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class CrewJSONEncoder(json.JSONEncoder):
|
||||
def default(self, obj):
|
||||
if isinstance(obj, datetime):
|
||||
return obj.isoformat()
|
||||
if isinstance(obj, UUID):
|
||||
return str(obj)
|
||||
if isinstance(obj, BaseModel):
|
||||
return obj.model_dump()
|
||||
if hasattr(obj, "__dict__"):
|
||||
return obj.__dict__
|
||||
return str(obj)
|
||||
@@ -1,6 +1,9 @@
|
||||
import os
|
||||
import pickle
|
||||
from datetime import datetime
|
||||
import json
|
||||
|
||||
from crewai.utilities.crew_json_encoder import CrewJSONEncoder
|
||||
|
||||
|
||||
class FileHandler:
|
||||
@@ -66,3 +69,37 @@ class PickleHandler:
|
||||
return {} # Return an empty dictionary if the file is empty or corrupted
|
||||
except Exception:
|
||||
raise # Raise any other exceptions that occur during loading
|
||||
|
||||
|
||||
class TaskOutputJsonHandler:
|
||||
def __init__(self, file_name: str) -> None:
|
||||
self.file_path = os.path.join(os.getcwd(), file_name)
|
||||
|
||||
def initialize_file(self) -> None:
|
||||
if not os.path.exists(self.file_path) or os.path.getsize(self.file_path) == 0:
|
||||
with open(self.file_path, "w") as file:
|
||||
json.dump([], file)
|
||||
|
||||
def append(self, log) -> None:
|
||||
if not os.path.exists(self.file_path) or os.path.getsize(self.file_path) == 0:
|
||||
# Initialize the file with an empty list if it doesn't exist or is empty
|
||||
with open(self.file_path, "w") as file:
|
||||
json.dump([], file)
|
||||
with open(self.file_path, "r+") as file:
|
||||
try:
|
||||
file_data = json.load(file)
|
||||
except json.JSONDecodeError:
|
||||
# If the file contains invalid JSON, initialize it with an empty list
|
||||
file_data = []
|
||||
|
||||
file_data.append(log)
|
||||
file.seek(0)
|
||||
json.dump(file_data, file, indent=2, cls=CrewJSONEncoder)
|
||||
file.truncate()
|
||||
|
||||
def load(self) -> list:
|
||||
if not os.path.exists(self.file_path) or os.path.getsize(self.file_path) == 0:
|
||||
return []
|
||||
|
||||
with open(self.file_path, "r") as file:
|
||||
return json.load(file)
|
||||
|
||||
12
src/crewai/utilities/formatter.py
Normal file
12
src/crewai/utilities/formatter.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from typing import List
|
||||
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
|
||||
|
||||
def aggregate_raw_outputs_from_task_outputs(task_outputs: List[TaskOutput]) -> str:
|
||||
"""Generate string context from the task outputs."""
|
||||
dividers = "\n\n----------\n\n"
|
||||
|
||||
# Join task outputs with dividers
|
||||
context = dividers.join(output.raw_output for output in task_outputs)
|
||||
return context
|
||||
@@ -8,6 +8,8 @@ class Printer:
|
||||
self._print_bold_green(content)
|
||||
elif color == "bold_purple":
|
||||
self._print_bold_purple(content)
|
||||
elif color == "bold_blue":
|
||||
self._print_bold_blue(content)
|
||||
else:
|
||||
print(content)
|
||||
|
||||
@@ -22,3 +24,6 @@ class Printer:
|
||||
|
||||
def _print_red(self, content):
|
||||
print("\033[91m {}\033[00m".format(content))
|
||||
|
||||
def _print_bold_blue(self, content):
|
||||
print("\033[1m\033[94m {}\033[00m".format(content))
|
||||
|
||||
@@ -4,10 +4,6 @@ from unittest import mock
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
from langchain.tools import tool
|
||||
from langchain_core.exceptions import OutputParserException
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
from crewai import Agent, Crew, Task
|
||||
from crewai.agents.cache import CacheHandler
|
||||
from crewai.agents.executor import CrewAgentExecutor
|
||||
@@ -15,6 +11,9 @@ from crewai.agents.parser import CrewAgentParser
|
||||
from crewai.tools.tool_calling import InstructorToolCalling
|
||||
from crewai.tools.tool_usage import ToolUsage
|
||||
from crewai.utilities import RPMController
|
||||
from langchain.tools import tool
|
||||
from langchain_core.exceptions import OutputParserException
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
|
||||
def test_agent_creation():
|
||||
@@ -631,8 +630,8 @@ def test_agent_use_specific_tasks_output_as_context(capsys):
|
||||
|
||||
crew = Crew(agents=[agent1, agent2], tasks=tasks)
|
||||
result = crew.kickoff()
|
||||
assert "bye" not in result.lower()
|
||||
assert "hi" in result.lower() or "hello" in result.lower()
|
||||
assert "bye" not in result.raw_output().lower()
|
||||
assert "hi" in result.raw_output().lower() or "hello" in result.raw_output().lower()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -750,12 +749,11 @@ def test_tool_result_as_answer_is_the_final_answer_for_the_agent():
|
||||
crew = Crew(agents=[agent1], tasks=tasks)
|
||||
|
||||
result = crew.kickoff()
|
||||
assert result == "Howdy!"
|
||||
|
||||
|
||||
pytest.mark.vcr(filter_headers=["authorization"])
|
||||
print("RESULT: ", result.raw_output())
|
||||
assert result.raw_output() == "Howdy!"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_tool_usage_information_is_appended_to_agent():
|
||||
from crewai_tools import BaseTool
|
||||
|
||||
|
||||
1804
tests/cassettes/test_async_execution_single_task.yaml
Normal file
1804
tests/cassettes/test_async_execution_single_task.yaml
Normal file
File diff suppressed because it is too large
Load Diff
4694
tests/cassettes/test_async_task_execution.yaml
Normal file
4694
tests/cassettes/test_async_task_execution.yaml
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
591
tests/cassettes/test_crew_async_kickoff.yaml
Normal file
591
tests/cassettes/test_crew_async_kickoff.yaml
Normal file
@@ -0,0 +1,591 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"content": "You are dog Researcher. You have a lot of experience
|
||||
with dog.\nYour personal goal is: Express hot takes on dog.To give my best complete
|
||||
final answer to the task use the exact following format:\n\nThought: I now can
|
||||
give a great answer\nFinal Answer: my best complete final answer to the task.\nYour
|
||||
final answer must be the great and the most complete as possible, it must be
|
||||
outcome described.\n\nI MUST use these formats, my job depends on it!\nCurrent
|
||||
Task: Give me an analysis around dog.\n\nThis is the expect criteria for your
|
||||
final answer: 1 bullet point about dog that''s under 15 words. \n you MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:\n", "role": "user"}], "model": "gpt-4o",
|
||||
"n": 1, "stop": ["\nObservation"], "stream": true, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, br
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '951'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.34.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.34.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
now"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
can"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
give"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
a"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
great"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
Answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
Dogs"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
are"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
incredibly"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
loyal"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
and"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
provide"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
unmatched"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
companionship"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
to"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
humans"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 89d0fa4e7abf53db-ATL
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Tue, 02 Jul 2024 19:17:45 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=6Xl2nvdsXT4uSfQ3C1ZK.LWKGYekVs5ErrLDZOdI.50-1719947865-1.0.1.1-6RQoTCznxe7H868MoxghRegIZaElbG_bN_jbs94hmnsnuR1P9bptoj8o2DbOSvj48ubewyvy8L16mOZHlMLw_A;
|
||||
path=/; expires=Tue, 02-Jul-24 19:47:45 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=kPTMOkGHQp0ytgVUrm3jFNiB9I.DDI2ONPRTr6IMTeo-1719947865623-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '102'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '16000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9997'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '15999783'
|
||||
x-ratelimit-reset-requests:
|
||||
- 14ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_2c5219e228ce79f0131c497230904013
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"content": "You are apple Researcher. You have a lot of
|
||||
experience with apple.\nYour personal goal is: Express hot takes on apple.To
|
||||
give my best complete final answer to the task use the exact following format:\n\nThought:
|
||||
I now can give a great answer\nFinal Answer: my best complete final answer to
|
||||
the task.\nYour final answer must be the great and the most complete as possible,
|
||||
it must be outcome described.\n\nI MUST use these formats, my job depends on
|
||||
it!\nCurrent Task: Give me an analysis around apple.\n\nThis is the expect criteria
|
||||
for your final answer: 1 bullet point about apple that''s under 15 words. \n
|
||||
you MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:\n", "role": "user"}], "model": "gpt-4o",
|
||||
"n": 1, "stop": ["\nObservation"], "stream": true, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, br
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '961'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.34.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.34.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
now"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
can"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
give"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
a"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
great"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":".\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
Answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
Apple"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
revolution"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"izes"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
technology"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
with"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
sleek"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
designs"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":","},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
seamless"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
integration"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":","},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
and"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
innovative"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
user"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
experiences"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 89d0fa4e7ca907e6-ATL
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Tue, 02 Jul 2024 19:17:45 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=wf2ozMjr46sG0EhuZjpiDNagwTxC05ct3Hn7Y9Rs5AI-1719947865-1.0.1.1-uckxTTr7Yfe6sv4ZznqqrGTEz9E3_Cpp7OAWBIEeNz1Smdjwijw8YV5oYPe_6W4DrEtwVzRDxaqIHlWP55O0QA;
|
||||
path=/; expires=Tue, 02-Jul-24 19:47:45 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=F9pWw4TeoPa8puOm5RN9Gp2oY0lRoN53ChZ1qFYx1S8-1719947865726-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '168'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '16000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9998'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '15999780'
|
||||
x-ratelimit-reset-requests:
|
||||
- 10ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_e6dfeda5935eae030bcc2da526234635
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"content": "You are cat Researcher. You have a lot of experience
|
||||
with cat.\nYour personal goal is: Express hot takes on cat.To give my best complete
|
||||
final answer to the task use the exact following format:\n\nThought: I now can
|
||||
give a great answer\nFinal Answer: my best complete final answer to the task.\nYour
|
||||
final answer must be the great and the most complete as possible, it must be
|
||||
outcome described.\n\nI MUST use these formats, my job depends on it!\nCurrent
|
||||
Task: Give me an analysis around cat.\n\nThis is the expect criteria for your
|
||||
final answer: 1 bullet point about cat that''s under 15 words. \n you MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:\n", "role": "user"}], "model": "gpt-4o",
|
||||
"n": 1, "stop": ["\nObservation"], "stream": true, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, br
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '951'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.34.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.34.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
now"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
can"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
give"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
a"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
great"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
Answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
Cats"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
are"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
master"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"ful"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
hunters"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
and"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
brilliant"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
problem"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"-sol"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"vers"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 89d0fa4e7ae912d7-ATL
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Tue, 02 Jul 2024 19:17:45 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=y7JNZ8WEp.q5pMXLi79ajfcI.F6MfE0GeYLw34Apkf0-1719947865-1.0.1.1-QKklGeYuOnsQROgqMs42XwqKNvW.mPrmcbtaxMnUg3eSgI7TRnRq4qPuSan0ynDt4Hd9NMuls2FR.Caa1MVr9Q;
|
||||
path=/; expires=Tue, 02-Jul-24 19:47:45 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=FVQoSgcvVyiB_o43X6y5MGYgzGojmsQqS.nPObW3JYU-1719947865679-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '132'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '16000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '15999783'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_a06bde4044d3ee75edf08f333139679c
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -1,314 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"content": "You are test role. test backstory\nYour personal
|
||||
goal is: test goalTo give my best complete final answer to the task use the
|
||||
exact following format:\n\nThought: I now can give a great answer\nFinal Answer:
|
||||
my best complete final answer to the task.\nYour final answer must be the great
|
||||
and the most complete as possible, it must be outcome described.\n\nI MUST use
|
||||
these formats, my job depends on it!\nCurrent Task: just say hi!\n\nThis is
|
||||
the expect criteria for your final answer: your greeting \n you MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:\n", "role": "user"}], "model": "gpt-4o",
|
||||
"n": 1, "stop": ["\nObservation"], "stream": true, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, br
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '853'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.34.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.34.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-9gGvixvu2sSTjML4oN3fsoMeTHbew","object":"chat.completion.chunk","created":1719861882,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvixvu2sSTjML4oN3fsoMeTHbew","object":"chat.completion.chunk","created":1719861882,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvixvu2sSTjML4oN3fsoMeTHbew","object":"chat.completion.chunk","created":1719861882,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvixvu2sSTjML4oN3fsoMeTHbew","object":"chat.completion.chunk","created":1719861882,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvixvu2sSTjML4oN3fsoMeTHbew","object":"chat.completion.chunk","created":1719861882,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
now"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvixvu2sSTjML4oN3fsoMeTHbew","object":"chat.completion.chunk","created":1719861882,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
can"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvixvu2sSTjML4oN3fsoMeTHbew","object":"chat.completion.chunk","created":1719861882,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
give"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvixvu2sSTjML4oN3fsoMeTHbew","object":"chat.completion.chunk","created":1719861882,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
a"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvixvu2sSTjML4oN3fsoMeTHbew","object":"chat.completion.chunk","created":1719861882,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
great"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvixvu2sSTjML4oN3fsoMeTHbew","object":"chat.completion.chunk","created":1719861882,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvixvu2sSTjML4oN3fsoMeTHbew","object":"chat.completion.chunk","created":1719861882,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvixvu2sSTjML4oN3fsoMeTHbew","object":"chat.completion.chunk","created":1719861882,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvixvu2sSTjML4oN3fsoMeTHbew","object":"chat.completion.chunk","created":1719861882,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
Answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvixvu2sSTjML4oN3fsoMeTHbew","object":"chat.completion.chunk","created":1719861882,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvixvu2sSTjML4oN3fsoMeTHbew","object":"chat.completion.chunk","created":1719861882,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
Hi"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvixvu2sSTjML4oN3fsoMeTHbew","object":"chat.completion.chunk","created":1719861882,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"!"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvixvu2sSTjML4oN3fsoMeTHbew","object":"chat.completion.chunk","created":1719861882,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 89c8c71ccad81823-ATL
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Mon, 01 Jul 2024 19:24:42 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=uU.2MR0L4Mv3xs4DzFlWOQLVId1dJXQBlWffhr9mqxU-1719861882-1.0.1.1-JSKN2_O9iYj8QCZjy0IGiunZxvXimz5Kzv5wQJedVua5E6WIl1UvP.wguXbK0cds7ayJReYnR8v8oAN2rmtnNQ;
|
||||
path=/; expires=Mon, 01-Jul-24 19:54:42 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=yc5Q7WKbO5zoiGNQx86HpHNM3HeXi2HxCxw31lL_UuU-1719861882665-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '86'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '16000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '15999808'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_25d95f35048bf71e28d73fbed6576a6c
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"content": "You are test role. test backstory\nYour personal
|
||||
goal is: test goalTo give my best complete final answer to the task use the
|
||||
exact following format:\n\nThought: I now can give a great answer\nFinal Answer:
|
||||
my best complete final answer to the task.\nYour final answer must be the great
|
||||
and the most complete as possible, it must be outcome described.\n\nI MUST use
|
||||
these formats, my job depends on it!\nCurrent Task: just say hello!\n\nThis
|
||||
is the expect criteria for your final answer: your greeting \n you MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nThis is the
|
||||
context you''re working with:\nHi!\n\nBegin! This is VERY important to you,
|
||||
use the tools available and give your best Final Answer, your job depends on
|
||||
it!\n\nThought:\n", "role": "user"}], "model": "gpt-4o", "n": 1, "stop": ["\nObservation"],
|
||||
"stream": true, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, br
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '905'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=uU.2MR0L4Mv3xs4DzFlWOQLVId1dJXQBlWffhr9mqxU-1719861882-1.0.1.1-JSKN2_O9iYj8QCZjy0IGiunZxvXimz5Kzv5wQJedVua5E6WIl1UvP.wguXbK0cds7ayJReYnR8v8oAN2rmtnNQ;
|
||||
_cfuvid=yc5Q7WKbO5zoiGNQx86HpHNM3HeXi2HxCxw31lL_UuU-1719861882665-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.34.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.34.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-9gGvjRHciTPrlyXWRGu5z5C56L10c","object":"chat.completion.chunk","created":1719861883,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvjRHciTPrlyXWRGu5z5C56L10c","object":"chat.completion.chunk","created":1719861883,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvjRHciTPrlyXWRGu5z5C56L10c","object":"chat.completion.chunk","created":1719861883,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvjRHciTPrlyXWRGu5z5C56L10c","object":"chat.completion.chunk","created":1719861883,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvjRHciTPrlyXWRGu5z5C56L10c","object":"chat.completion.chunk","created":1719861883,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
now"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvjRHciTPrlyXWRGu5z5C56L10c","object":"chat.completion.chunk","created":1719861883,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
can"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvjRHciTPrlyXWRGu5z5C56L10c","object":"chat.completion.chunk","created":1719861883,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
give"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvjRHciTPrlyXWRGu5z5C56L10c","object":"chat.completion.chunk","created":1719861883,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
a"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvjRHciTPrlyXWRGu5z5C56L10c","object":"chat.completion.chunk","created":1719861883,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
great"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvjRHciTPrlyXWRGu5z5C56L10c","object":"chat.completion.chunk","created":1719861883,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvjRHciTPrlyXWRGu5z5C56L10c","object":"chat.completion.chunk","created":1719861883,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvjRHciTPrlyXWRGu5z5C56L10c","object":"chat.completion.chunk","created":1719861883,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvjRHciTPrlyXWRGu5z5C56L10c","object":"chat.completion.chunk","created":1719861883,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
Answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvjRHciTPrlyXWRGu5z5C56L10c","object":"chat.completion.chunk","created":1719861883,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvjRHciTPrlyXWRGu5z5C56L10c","object":"chat.completion.chunk","created":1719861883,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
Hello"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvjRHciTPrlyXWRGu5z5C56L10c","object":"chat.completion.chunk","created":1719861883,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"!"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvjRHciTPrlyXWRGu5z5C56L10c","object":"chat.completion.chunk","created":1719861883,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 89c8c7202e0f1823-ATL
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Mon, 01 Jul 2024 19:24:43 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '82'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '16000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '15999794'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_754b5067e8f56d5c1182dc0f57be0e45
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
258
tests/cassettes/test_custom_converter_cls.yaml
Normal file
258
tests/cassettes/test_custom_converter_cls.yaml
Normal file
@@ -0,0 +1,258 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"content": "You are Scorer. You''re an expert scorer, specialized
|
||||
in scoring titles.\nYour personal goal is: Score the titleTo give my best complete
|
||||
final answer to the task use the exact following format:\n\nThought: I now can
|
||||
give a great answer\nFinal Answer: my best complete final answer to the task.\nYour
|
||||
final answer must be the great and the most complete as possible, it must be
|
||||
outcome described.\n\nI MUST use these formats, my job depends on it!\nCurrent
|
||||
Task: Give me an integer score between 1-5 for the following title: ''The impact
|
||||
of AI in the future of work''\n\nThis is the expect criteria for your final
|
||||
answer: The score of the title. \n you MUST return the actual complete content
|
||||
as the final answer, not a summary.\n\nBegin! This is VERY important to you,
|
||||
use the tools available and give your best Final Answer, your job depends on
|
||||
it!\n\nThought:\n", "role": "user"}], "model": "gpt-4o", "n": 1, "stop": ["\nObservation"],
|
||||
"stream": true, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '997'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.35.10
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.35.10
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-9hrsMKHuOkxqftWK9DtuC10VCJ17t","object":"chat.completion.chunk","created":1720242230,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hrsMKHuOkxqftWK9DtuC10VCJ17t","object":"chat.completion.chunk","created":1720242230,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hrsMKHuOkxqftWK9DtuC10VCJ17t","object":"chat.completion.chunk","created":1720242230,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hrsMKHuOkxqftWK9DtuC10VCJ17t","object":"chat.completion.chunk","created":1720242230,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hrsMKHuOkxqftWK9DtuC10VCJ17t","object":"chat.completion.chunk","created":1720242230,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
now"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hrsMKHuOkxqftWK9DtuC10VCJ17t","object":"chat.completion.chunk","created":1720242230,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
can"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hrsMKHuOkxqftWK9DtuC10VCJ17t","object":"chat.completion.chunk","created":1720242230,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
give"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hrsMKHuOkxqftWK9DtuC10VCJ17t","object":"chat.completion.chunk","created":1720242230,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
a"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hrsMKHuOkxqftWK9DtuC10VCJ17t","object":"chat.completion.chunk","created":1720242230,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
great"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hrsMKHuOkxqftWK9DtuC10VCJ17t","object":"chat.completion.chunk","created":1720242230,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hrsMKHuOkxqftWK9DtuC10VCJ17t","object":"chat.completion.chunk","created":1720242230,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hrsMKHuOkxqftWK9DtuC10VCJ17t","object":"chat.completion.chunk","created":1720242230,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hrsMKHuOkxqftWK9DtuC10VCJ17t","object":"chat.completion.chunk","created":1720242230,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
Answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hrsMKHuOkxqftWK9DtuC10VCJ17t","object":"chat.completion.chunk","created":1720242230,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hrsMKHuOkxqftWK9DtuC10VCJ17t","object":"chat.completion.chunk","created":1720242230,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hrsMKHuOkxqftWK9DtuC10VCJ17t","object":"chat.completion.chunk","created":1720242230,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"4"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hrsMKHuOkxqftWK9DtuC10VCJ17t","object":"chat.completion.chunk","created":1720242230,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 89ed0cf0dc05741a-MIA
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Sat, 06 Jul 2024 05:03:50 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=JI76H4xxreAnMx1JJoPragplAdYdjbDNA68Hr3Cs_0k-1720242230-1.0.1.1-oHSrtm.ejkvCiAHC11lg0MnvmopYZayTZRq09IcH2yh5BA6FyyufGH7Rm59BAz.gdZHc0izmjElXfLiu2bZ_jQ;
|
||||
path=/; expires=Sat, 06-Jul-24 05:33:50 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=X4.n0cNP9j1jseIPV4H1aDJu2xrsAwcUI8rY0tbLc40-1720242230210-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '71'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '16000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '15999772'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_8dc1d49d85fcf8e39601e32ca80abd6b
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "4"}, {"role": "system", "content":
|
||||
"I''m gonna convert this raw text into valid JSON."}], "model": "gpt-4o", "tool_choice":
|
||||
{"type": "function", "function": {"name": "ScoreOutput"}}, "tools": [{"type":
|
||||
"function", "function": {"name": "ScoreOutput", "description": "Correctly extracted
|
||||
`ScoreOutput` with all the required parameters with correct types", "parameters":
|
||||
{"properties": {"score": {"title": "Score", "type": "integer"}}, "required":
|
||||
["score"], "type": "object"}}}]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '519'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=JI76H4xxreAnMx1JJoPragplAdYdjbDNA68Hr3Cs_0k-1720242230-1.0.1.1-oHSrtm.ejkvCiAHC11lg0MnvmopYZayTZRq09IcH2yh5BA6FyyufGH7Rm59BAz.gdZHc0izmjElXfLiu2bZ_jQ;
|
||||
_cfuvid=X4.n0cNP9j1jseIPV4H1aDJu2xrsAwcUI8rY0tbLc40-1720242230210-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.35.10
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.35.10
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA2xSS2/bMAy++1cIPNeF81pT34YBG9A13aHAgL5gKArtKJVFTaKBtkH++yDFi91g
|
||||
PggEP34PkN5nQoDeQClAbSWr1pn8euvD6id9OP3V3C6bzZ2qd7eT1f3DykgFF5FB6x0q/se6VNQ6
|
||||
g6zJHmHlUTJG1cnVtJjOp9NZkYCWNmgirXGczymPYF4s8smsJ25JKwxQiqdMCCH26Y0R7QbfoBRJ
|
||||
JnVaDEE2COVpSAjwZGIHZAg6sLQMFwOoyDLamNp2xowAJjKVksYMxsdvP6qHPUljqsI2duX13ePN
|
||||
w2/7HT/+8Lfd45cfYeR3lH53KVDdWXXazwg/9cszMyHAyjZx7xV5/NWx6/iMLgRI33QtWo7RYf8M
|
||||
IQ4/Qzk/wKfRQ/a/+qWvDqe1Gmqcp3U42xLU2uqwrTzKkNJCYHJHiyj3ks7XfboIOE+t44rpFW0U
|
||||
XPbXg+F/GcBFjzGxNCPOIuvjQXgPjG1Va9ugd16nU0LtqnlRLHG2vppcQ3bI/gIAAP//AwCtLU45
|
||||
0wIAAA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 89ed0cf40ebc741a-MIA
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 06 Jul 2024 05:03:50 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '186'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '16000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '15999969'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_5da164d15ccb331864aeb5d3562969aa
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,193 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"content": "You are Researcher. You''re an expert researcher,
|
||||
specialized in technology, software engineering, AI and startups. You work as
|
||||
a freelancer and is now working on doing research and analysis for a new customer.\nYour
|
||||
personal goal is: Make the best research and analysis on content about AI and
|
||||
AI agentsTo give my best complete final answer to the task use the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: my best complete
|
||||
final answer to the task.\nYour final answer must be the great and the most
|
||||
complete as possible, it must be outcome described.\n\nI MUST use these formats,
|
||||
my job depends on it!\nCurrent Task: Look at the available data nd give me a
|
||||
sense on the total number of sales.\n\nThis is the expect criteria for your
|
||||
final answer: The total number of sales as an integer \n you MUST return the
|
||||
actual complete content as the final answer, not a summary.\n\nBegin! This is
|
||||
VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:\n", "role": "user"}], "model": "gpt-4o",
|
||||
"n": 1, "stop": ["\nObservation"], "stream": true, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, br
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1178'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.34.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.34.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
now"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
can"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
give"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
a"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
great"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
Answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
The"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
total"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
number"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
of"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
sales"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
is"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"150"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"0"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 89c9d0107c8abd30-ATL
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Mon, 01 Jul 2024 22:25:35 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=xIvvDveyc7bpEywphx5N4EscKoZiGAT_yDVu3aFAWZ4-1719872735-1.0.1.1-ZOUYc2kEes8fxrMFgGdVppzOh9nPbl4y1Syv73ORt38FBXePWFSTJrFZCZRU.zob6ks9nWzr2vBIZbBQdAOOGQ;
|
||||
path=/; expires=Mon, 01-Jul-24 22:55:35 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=aG1BGRRkNAyxmctM98.DLqSNJ2Cx_OQYsMRQbd03.bo-1719872735091-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '80'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '16000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '15999725'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 1ms
|
||||
x-request-id:
|
||||
- req_c90015b7584729268f48a8b33ff7c5ea
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
235903
tests/cassettes/test_hierarchical_async_task_execution_completion.yaml
Normal file
235903
tests/cassettes/test_hierarchical_async_task_execution_completion.yaml
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -46,64 +46,64 @@ interactions:
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-9hEBF5KduXR80dsfVx2VivLMxOk4w","object":"chat.completion.chunk","created":1720089641,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||
string: 'data: {"id":"chatcmpl-9hry2om1JBkreHpDHFbfD2YDtg2oA","object":"chat.completion.chunk","created":1720242582,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hEBF5KduXR80dsfVx2VivLMxOk4w","object":"chat.completion.chunk","created":1720089641,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}]}
|
||||
data: {"id":"chatcmpl-9hry2om1JBkreHpDHFbfD2YDtg2oA","object":"chat.completion.chunk","created":1720242582,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hEBF5KduXR80dsfVx2VivLMxOk4w","object":"chat.completion.chunk","created":1720089641,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
data: {"id":"chatcmpl-9hry2om1JBkreHpDHFbfD2YDtg2oA","object":"chat.completion.chunk","created":1720242582,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hEBF5KduXR80dsfVx2VivLMxOk4w","object":"chat.completion.chunk","created":1720089641,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
data: {"id":"chatcmpl-9hry2om1JBkreHpDHFbfD2YDtg2oA","object":"chat.completion.chunk","created":1720242582,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hEBF5KduXR80dsfVx2VivLMxOk4w","object":"chat.completion.chunk","created":1720089641,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
data: {"id":"chatcmpl-9hry2om1JBkreHpDHFbfD2YDtg2oA","object":"chat.completion.chunk","created":1720242582,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
now"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hEBF5KduXR80dsfVx2VivLMxOk4w","object":"chat.completion.chunk","created":1720089641,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
data: {"id":"chatcmpl-9hry2om1JBkreHpDHFbfD2YDtg2oA","object":"chat.completion.chunk","created":1720242582,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
can"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hEBF5KduXR80dsfVx2VivLMxOk4w","object":"chat.completion.chunk","created":1720089641,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
data: {"id":"chatcmpl-9hry2om1JBkreHpDHFbfD2YDtg2oA","object":"chat.completion.chunk","created":1720242582,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
give"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hEBF5KduXR80dsfVx2VivLMxOk4w","object":"chat.completion.chunk","created":1720089641,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
data: {"id":"chatcmpl-9hry2om1JBkreHpDHFbfD2YDtg2oA","object":"chat.completion.chunk","created":1720242582,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
a"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hEBF5KduXR80dsfVx2VivLMxOk4w","object":"chat.completion.chunk","created":1720089641,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
data: {"id":"chatcmpl-9hry2om1JBkreHpDHFbfD2YDtg2oA","object":"chat.completion.chunk","created":1720242582,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
great"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hEBF5KduXR80dsfVx2VivLMxOk4w","object":"chat.completion.chunk","created":1720089641,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
data: {"id":"chatcmpl-9hry2om1JBkreHpDHFbfD2YDtg2oA","object":"chat.completion.chunk","created":1720242582,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hEBF5KduXR80dsfVx2VivLMxOk4w","object":"chat.completion.chunk","created":1720089641,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]}
|
||||
data: {"id":"chatcmpl-9hry2om1JBkreHpDHFbfD2YDtg2oA","object":"chat.completion.chunk","created":1720242582,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hEBF5KduXR80dsfVx2VivLMxOk4w","object":"chat.completion.chunk","created":1720089641,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
|
||||
data: {"id":"chatcmpl-9hry2om1JBkreHpDHFbfD2YDtg2oA","object":"chat.completion.chunk","created":1720242582,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hEBF5KduXR80dsfVx2VivLMxOk4w","object":"chat.completion.chunk","created":1720089641,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
data: {"id":"chatcmpl-9hry2om1JBkreHpDHFbfD2YDtg2oA","object":"chat.completion.chunk","created":1720242582,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
Answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hEBF5KduXR80dsfVx2VivLMxOk4w","object":"chat.completion.chunk","created":1720089641,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
data: {"id":"chatcmpl-9hry2om1JBkreHpDHFbfD2YDtg2oA","object":"chat.completion.chunk","created":1720242582,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hEBF5KduXR80dsfVx2VivLMxOk4w","object":"chat.completion.chunk","created":1720089641,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
data: {"id":"chatcmpl-9hry2om1JBkreHpDHFbfD2YDtg2oA","object":"chat.completion.chunk","created":1720242582,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hEBF5KduXR80dsfVx2VivLMxOk4w","object":"chat.completion.chunk","created":1720089641,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"4"},"logprobs":null,"finish_reason":null}]}
|
||||
data: {"id":"chatcmpl-9hry2om1JBkreHpDHFbfD2YDtg2oA","object":"chat.completion.chunk","created":1720242582,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"4"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hEBF5KduXR80dsfVx2VivLMxOk4w","object":"chat.completion.chunk","created":1720089641,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||
data: {"id":"chatcmpl-9hry2om1JBkreHpDHFbfD2YDtg2oA","object":"chat.completion.chunk","created":1720242582,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
@@ -114,20 +114,20 @@ interactions:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 89de7fa17ed72878-MIA
|
||||
- 89ed158b8bf0a566-MIA
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Thu, 04 Jul 2024 10:40:41 GMT
|
||||
- Sat, 06 Jul 2024 05:09:42 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=m9ZbE3d_dSMsckOHqfK8mMIzKxCRI4vqsIelidShwVc-1720089641-1.0.1.1-xigQSnpNWopswY4gNuGCIgc2MR64bcUc6bpFwdeThTINo0jBkROlwHpIGyjOBQo3goJboqk_kUa_XZby0or19g;
|
||||
path=/; expires=Thu, 04-Jul-24 11:10:41 GMT; domain=.api.openai.com; HttpOnly;
|
||||
- __cf_bm=5C3MG9ni0I5bZoHGzfXZq16obGaD1INR3_.wX4CRPAk-1720242582-1.0.1.1-fZiD6L1FdBiC0gqcmBK9_IaHhbHPQi4z04fxYQtoDc9KbYqPvxm_sxP_RkuZX_AyPkHgu85IRq9E6MUAZJGzwQ;
|
||||
path=/; expires=Sat, 06-Jul-24 05:39:42 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=NgRTBkQl5NRUhXSdkH3Y7qNaA.KrG7PvxiuoOp9ip8w-1720089641502-0.0.1.1-604800000;
|
||||
- _cfuvid=YP7Z3XnHPKQDU2nOhrLzkxr8InOv42HLWchJd1ogneQ-1720242582534-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
@@ -136,7 +136,7 @@ interactions:
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '109'
|
||||
- '90'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -154,7 +154,7 @@ interactions:
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_2d7ac1e1ca6d58559a236046b682021e
|
||||
- req_36d283adbca77945609f0da658047ba0
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
@@ -178,8 +178,8 @@ interactions:
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=m9ZbE3d_dSMsckOHqfK8mMIzKxCRI4vqsIelidShwVc-1720089641-1.0.1.1-xigQSnpNWopswY4gNuGCIgc2MR64bcUc6bpFwdeThTINo0jBkROlwHpIGyjOBQo3goJboqk_kUa_XZby0or19g;
|
||||
_cfuvid=NgRTBkQl5NRUhXSdkH3Y7qNaA.KrG7PvxiuoOp9ip8w-1720089641502-0.0.1.1-604800000
|
||||
- __cf_bm=5C3MG9ni0I5bZoHGzfXZq16obGaD1INR3_.wX4CRPAk-1720242582-1.0.1.1-fZiD6L1FdBiC0gqcmBK9_IaHhbHPQi4z04fxYQtoDc9KbYqPvxm_sxP_RkuZX_AyPkHgu85IRq9E6MUAZJGzwQ;
|
||||
_cfuvid=YP7Z3XnHPKQDU2nOhrLzkxr8InOv42HLWchJd1ogneQ-1720242582534-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
@@ -203,19 +203,19 @@ interactions:
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA2xS32vbMBB+918h7jkuTuwktd/WkcCgI6zdRmEtRlVkR5usU6UzbQj534scN3bD
|
||||
/CCO++77wZ0PEWOgtlAwEDtOorE6znermzXiPr+xv17vVtn++636PcNNmrw8/IRJYODzXynog3Ul
|
||||
sLFakkJzgoWTnGRQnS5nSXKdL7JpBzS4lTrQaktxhvEsmWVxMo+naU/coRLSQ8H+RIwxdujeENFs
|
||||
5RsULJl8dBrpPa8lFOchxsChDh3g3itP3BBMBlCgIWlCatNqPQIIUZeCaz0Yn77DqB72xLUuza3W
|
||||
i1SuV8vNw2L18m2dp/buy4+vI7+T9N52garWiPN+Rvi5X1yYMQaGNx33XqCTm5ZsSxd0xoC7um2k
|
||||
oRAdDo/gw/AjFNkRPo0eo//VT311PK9VY20dPvuLLUGljPK70knuu7TgCe3JIsg9dedrP10ErMPG
|
||||
Ukn4T5ogeN1fD4b/ZQDnPUZIXI8486iPB37vSTZlpUwtnXWqOyVUthQyWeZpmiYVRMfoHQAA//8D
|
||||
ADLpRvfTAgAA
|
||||
H4sIAAAAAAAAA2xS30/bMBB+z19h3XMzhbShJW8wiW1MGogixDRQ5DpOanB8ln1hK1X/d+Q0NKFa
|
||||
HqzTfff90F22EWOgSsgZiDUn0Vgdn63dJl3d3rSv5xuf/b7Di/PLq/JUVYub5wwmgYGrZynog/VF
|
||||
YGO1JIVmDwsnOcmgejJPk3SWZou0AxospQ602lI8wziAcZLFJ9OeuEYlpIec/YkYY2zbvSGiKeU/
|
||||
yFky+eg00nteS8gPQ4yBQx06wL1XnrghmAygQEPShNSm1XoEEKIuBNd6MN5/21E97IlrXdxePlzN
|
||||
/s6/rpZv3x+WF9P7nz++vdz/8iO/vfTGdoGq1ojDfkb4oZ8fmTEGhjcddynQyeuWbEtHdMaAu7pt
|
||||
pKEQHbaP4MPwI+SzHXwa3UX/q5/6andYq8baOlz5oy1BpYzy68JJ7ru04Ant3iLIPXXnaz9dBKzD
|
||||
xlJB+CJNEFz014PhfxnArMcIiesRJ4v6eOA3nmRTVMrU0lmnulNCZYsym59Ok3l1lkC0i94BAAD/
|
||||
/wMAylx2sdMCAAA=
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 89de7fa4e8742878-MIA
|
||||
- 89ed158dee46a566-MIA
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -223,7 +223,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 04 Jul 2024 10:40:42 GMT
|
||||
- Sat, 06 Jul 2024 05:09:42 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -233,7 +233,7 @@ interactions:
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '209'
|
||||
- '144'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -251,7 +251,7 @@ interactions:
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_a66f76bc016b3f8752bac8c393e60578
|
||||
- req_990566332b9b1851c581486c0a4da0e6
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
|
||||
6244
tests/cassettes/test_sequential_async_task_execution_completion.yaml
Normal file
6244
tests/cassettes/test_sequential_async_task_execution_completion.yaml
Normal file
File diff suppressed because it is too large
Load Diff
333
tests/cassettes/test_single_task_with_async_execution.yaml
Normal file
333
tests/cassettes/test_single_task_with_async_execution.yaml
Normal file
@@ -0,0 +1,333 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"content": "You are Researcher. You''re an expert researcher,
|
||||
specialized in technology, software engineering, AI and startups. You work as
|
||||
a freelancer and is now working on doing research and analysis for a new customer.\nYour
|
||||
personal goal is: Make the best research and analysis on content about AI and
|
||||
AI agentsTo give my best complete final answer to the task use the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: my best complete
|
||||
final answer to the task.\nYour final answer must be the great and the most
|
||||
complete as possible, it must be outcome described.\n\nI MUST use these formats,
|
||||
my job depends on it!\nCurrent Task: Generate a list of 5 interesting ideas
|
||||
to explore for an article, where each bulletpoint is under 15 words.\n\nThis
|
||||
is the expect criteria for your final answer: Bullet point list of 5 important
|
||||
events. No additional commentary. \n you MUST return the actual complete content
|
||||
as the final answer, not a summary.\n\nBegin! This is VERY important to you,
|
||||
use the tools available and give your best Final Answer, your job depends on
|
||||
it!\n\nThought:\n", "role": "user"}], "model": "gpt-4o", "n": 1, "stop": ["\nObservation"],
|
||||
"stream": true, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, br
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1237'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.34.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.34.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
now"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
can"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
give"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
a"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
great"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
Answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"-"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
The"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
impact"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
of"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
AI"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
agents"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
on"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
remote"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
work"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
productivity"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":".\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"-"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
Ethical"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
considerations"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
in"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
AI"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"-driven"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
decision"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"-making"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":".\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"-"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
How"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
AI"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
agents"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
are"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
transforming"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
customer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
service"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":".\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"-"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
The"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
role"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
of"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
AI"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
in"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
personalized"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
learning"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
experiences"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":".\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"-"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
AI"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
advancements"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
in"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
healthcare"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
diagnostics"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 897653f3e8ba7ba2-ATL
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Fri, 21 Jun 2024 19:15:33 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=9ch02HraQXiYJx8jBtYzKXOBjm4nToP.1sBISDFt9Gc-1718997333-1.0.1.1-Ykz1rbMzc2Zo8VV5rBwixPedTuO8s_38psrpuLCSy2B.YIyCCXWMGI_JT5WGQVp2gacOcxjWMSVhOOY85gf9QQ;
|
||||
path=/; expires=Fri, 21-Jun-24 19:45:33 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=0srdhmUvYEBaQ2xn7BzySIPRoIiEPWzmvngtQRdnpUY-1718997333518-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '165'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '12000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '11999712'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 1ms
|
||||
x-request-id:
|
||||
- 92f00e3ecc754086e0ddf2d998f6f671
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
1097
tests/cassettes/test_three_task_with_async_execution.yaml
Normal file
1097
tests/cassettes/test_three_task_with_async_execution.yaml
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,335 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"content": "You are Friendly Neighbor. You are the friendly
|
||||
neighbor\nYour personal goal is: Make everyone feel welcome\nYou ONLY have access
|
||||
to the following tools, and should NEVER make up tools that are not listed here:\n\nDecide
|
||||
Greetings() -> str - Decide Greetings() - Decide what is the appropriate greeting
|
||||
to use\n\nUse the following format:\n\nThought: you should always think about
|
||||
what to do\nAction: the action to take, only one name of [Decide Greetings],
|
||||
just the name, exactly as it''s written.\nAction Input: the input to the action,
|
||||
just a simple python dictionary, enclosed in curly braces, using \" to wrap
|
||||
keys and values.\nObservation: the result of the action\n\nOnce all necessary
|
||||
information is gathered:\n\nThought: I now know the final answer\nFinal Answer:
|
||||
the final answer to the original input question\n\nCurrent Task: Say an appropriate
|
||||
greeting.\n\nThis is the expect criteria for your final answer: The greeting.
|
||||
\n you MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:\n", "role": "user"}], "model": "gpt-4o",
|
||||
"n": 1, "stop": ["\nObservation"], "stream": true, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, br
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1289'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.35.10
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.35.10
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-9hQWRAEA0akLHaVsdYQP1dYZ73QJC","object":"chat.completion.chunk","created":1720137083,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWRAEA0akLHaVsdYQP1dYZ73QJC","object":"chat.completion.chunk","created":1720137083,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWRAEA0akLHaVsdYQP1dYZ73QJC","object":"chat.completion.chunk","created":1720137083,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
need"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWRAEA0akLHaVsdYQP1dYZ73QJC","object":"chat.completion.chunk","created":1720137083,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
to"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWRAEA0akLHaVsdYQP1dYZ73QJC","object":"chat.completion.chunk","created":1720137083,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
decide"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWRAEA0akLHaVsdYQP1dYZ73QJC","object":"chat.completion.chunk","created":1720137083,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
on"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWRAEA0akLHaVsdYQP1dYZ73QJC","object":"chat.completion.chunk","created":1720137083,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
an"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWRAEA0akLHaVsdYQP1dYZ73QJC","object":"chat.completion.chunk","created":1720137083,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
appropriate"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWRAEA0akLHaVsdYQP1dYZ73QJC","object":"chat.completion.chunk","created":1720137083,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
greeting"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWRAEA0akLHaVsdYQP1dYZ73QJC","object":"chat.completion.chunk","created":1720137083,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":".\n\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWRAEA0akLHaVsdYQP1dYZ73QJC","object":"chat.completion.chunk","created":1720137083,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"Action"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWRAEA0akLHaVsdYQP1dYZ73QJC","object":"chat.completion.chunk","created":1720137083,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWRAEA0akLHaVsdYQP1dYZ73QJC","object":"chat.completion.chunk","created":1720137083,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
Decide"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWRAEA0akLHaVsdYQP1dYZ73QJC","object":"chat.completion.chunk","created":1720137083,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
Greetings"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWRAEA0akLHaVsdYQP1dYZ73QJC","object":"chat.completion.chunk","created":1720137083,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWRAEA0akLHaVsdYQP1dYZ73QJC","object":"chat.completion.chunk","created":1720137083,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"Action"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWRAEA0akLHaVsdYQP1dYZ73QJC","object":"chat.completion.chunk","created":1720137083,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
Input"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWRAEA0akLHaVsdYQP1dYZ73QJC","object":"chat.completion.chunk","created":1720137083,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWRAEA0akLHaVsdYQP1dYZ73QJC","object":"chat.completion.chunk","created":1720137083,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
{}\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWRAEA0akLHaVsdYQP1dYZ73QJC","object":"chat.completion.chunk","created":1720137083,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 89e305e3c8e382f5-GIG
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Thu, 04 Jul 2024 23:51:24 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=y7BtDW9RWNaYoBExulKsMw50ppqr1itieWbcStDWqVc-1720137084-1.0.1.1-EYCEQ9jOimP45.FgXjdzWftUrV1HHm49W4wbcxFhbrj2DVC1LnMbz9.l.c._AqBRgFAE3xVolosvjmoFDAMPYQ;
|
||||
path=/; expires=Fri, 05-Jul-24 00:21:24 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=pZBoWQ1_gTeUh2oe6ta.S2mxWtdaHvAtn6m2HszLdwk-1720137084219-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '335'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '16000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '15999700'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 1ms
|
||||
x-request-id:
|
||||
- req_b3f7e3c47df2641d6bef704ef3ae8a0f
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"content": "You are Friendly Neighbor. You are the friendly
|
||||
neighbor\nYour personal goal is: Make everyone feel welcome\nYou ONLY have access
|
||||
to the following tools, and should NEVER make up tools that are not listed here:\n\nDecide
|
||||
Greetings() -> str - Decide Greetings() - Decide what is the appropriate greeting
|
||||
to use\n\nUse the following format:\n\nThought: you should always think about
|
||||
what to do\nAction: the action to take, only one name of [Decide Greetings],
|
||||
just the name, exactly as it''s written.\nAction Input: the input to the action,
|
||||
just a simple python dictionary, enclosed in curly braces, using \" to wrap
|
||||
keys and values.\nObservation: the result of the action\n\nOnce all necessary
|
||||
information is gathered:\n\nThought: I now know the final answer\nFinal Answer:
|
||||
the final answer to the original input question\n\nCurrent Task: Say an appropriate
|
||||
greeting.\n\nThis is the expect criteria for your final answer: The greeting.
|
||||
\n you MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:\nI need to decide on an appropriate
|
||||
greeting.\n\nAction: Decide Greetings\nAction Input: {}\n\nObservation: Howdy!\n",
|
||||
"role": "user"}], "model": "gpt-4o", "n": 1, "stop": ["\nObservation"], "stream":
|
||||
true, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, br
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1404'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=y7BtDW9RWNaYoBExulKsMw50ppqr1itieWbcStDWqVc-1720137084-1.0.1.1-EYCEQ9jOimP45.FgXjdzWftUrV1HHm49W4wbcxFhbrj2DVC1LnMbz9.l.c._AqBRgFAE3xVolosvjmoFDAMPYQ;
|
||||
_cfuvid=pZBoWQ1_gTeUh2oe6ta.S2mxWtdaHvAtn6m2HszLdwk-1720137084219-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.35.10
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.35.10
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-9hQWSD5B35ANI9JLmbxUdPECfNd43","object":"chat.completion.chunk","created":1720137084,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWSD5B35ANI9JLmbxUdPECfNd43","object":"chat.completion.chunk","created":1720137084,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWSD5B35ANI9JLmbxUdPECfNd43","object":"chat.completion.chunk","created":1720137084,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWSD5B35ANI9JLmbxUdPECfNd43","object":"chat.completion.chunk","created":1720137084,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWSD5B35ANI9JLmbxUdPECfNd43","object":"chat.completion.chunk","created":1720137084,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
now"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWSD5B35ANI9JLmbxUdPECfNd43","object":"chat.completion.chunk","created":1720137084,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
know"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWSD5B35ANI9JLmbxUdPECfNd43","object":"chat.completion.chunk","created":1720137084,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
the"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWSD5B35ANI9JLmbxUdPECfNd43","object":"chat.completion.chunk","created":1720137084,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWSD5B35ANI9JLmbxUdPECfNd43","object":"chat.completion.chunk","created":1720137084,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWSD5B35ANI9JLmbxUdPECfNd43","object":"chat.completion.chunk","created":1720137084,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":".\n\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWSD5B35ANI9JLmbxUdPECfNd43","object":"chat.completion.chunk","created":1720137084,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWSD5B35ANI9JLmbxUdPECfNd43","object":"chat.completion.chunk","created":1720137084,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
Answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWSD5B35ANI9JLmbxUdPECfNd43","object":"chat.completion.chunk","created":1720137084,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWSD5B35ANI9JLmbxUdPECfNd43","object":"chat.completion.chunk","created":1720137084,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
How"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWSD5B35ANI9JLmbxUdPECfNd43","object":"chat.completion.chunk","created":1720137084,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"dy"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWSD5B35ANI9JLmbxUdPECfNd43","object":"chat.completion.chunk","created":1720137084,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"!"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9hQWSD5B35ANI9JLmbxUdPECfNd43","object":"chat.completion.chunk","created":1720137084,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 89e305ea4abc82f5-GIG
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Thu, 04 Jul 2024 23:51:24 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '91'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '16000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '15999673'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 1ms
|
||||
x-request-id:
|
||||
- req_10032db16fa190e8435947a6aaa700ff
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -1,6 +1,7 @@
|
||||
"""Test Agent creation and execution basic functionality."""
|
||||
|
||||
import json
|
||||
from concurrent.futures import Future
|
||||
from unittest import mock
|
||||
from unittest.mock import patch
|
||||
|
||||
@@ -10,9 +11,11 @@ import pytest
|
||||
from crewai.agent import Agent
|
||||
from crewai.agents.cache import CacheHandler
|
||||
from crewai.crew import Crew
|
||||
from crewai.crews.crew_output import CrewOutput
|
||||
from crewai.memory.contextual.contextual_memory import ContextualMemory
|
||||
from crewai.process import Process
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.utilities import Logger, RPMController
|
||||
|
||||
ceo = Agent(
|
||||
@@ -136,11 +139,57 @@ def test_crew_creation():
|
||||
tasks=tasks,
|
||||
)
|
||||
|
||||
assert (
|
||||
crew.kickoff()
|
||||
== "1. **The Rise of AI in Healthcare**: The convergence of AI and healthcare is a promising frontier, offering unprecedented opportunities for disease diagnosis and patient outcome prediction. AI's potential to revolutionize healthcare lies in its capacity to synthesize vast amounts of data, generating precise and efficient results. This technological breakthrough, however, is not just about improving accuracy and efficiency; it's about saving lives. As we stand on the precipice of this transformative era, we must prepare for the complex challenges and ethical questions it poses, while embracing its ability to reshape healthcare as we know it.\n\n2. **Ethical Implications of AI**: As AI intertwines with our daily lives, it presents a complex web of ethical dilemmas. This fusion of technology, philosophy, and ethics is not merely academically intriguing but profoundly impacts the fabric of our society. The questions raised range from decision-making transparency to accountability, and from privacy to potential biases. As we navigate this ethical labyrinth, it is crucial to establish robust frameworks and regulations to ensure that AI serves humanity, and not the other way around.\n\n3. **AI and Data Privacy**: The rise of AI brings with it an insatiable appetite for data, spawning new debates around privacy rights. Balancing the potential benefits of AI with the right to privacy is a unique challenge that intersects technology, law, and human rights. In an increasingly digital world, where personal information forms the backbone of many services, we must grapple with these issues. It's time to redefine the concept of privacy and devise innovative solutions that ensure our digital footprints are not abused.\n\n4. **AI in Job Market**: The discourse around AI's impact on employment is a narrative of contrast, a tale of displacement and creation. On one hand, AI threatens to automate a multitude of jobs, on the other, it promises to create new roles that we cannot yet imagine. This intersection of technology, economics, and labor rights is a critical dialogue that will shape our future. As we stand at this crossroads, we must not only brace ourselves for the changes but also seize the opportunities that this technological wave brings.\n\n5. **Future of AI Agents**: The evolution of AI agents signifies a leap towards a future where AI is not just a tool, but a partner. These sophisticated AI agents, employed in customer service to personal assistants, are redefining our interactions with technology. As we gaze into the future of AI agents, we see a landscape of possibilities and challenges. This journey will be about harnessing the potential of AI agents while navigating the issues of trust, dependence, and ethical use."
|
||||
result = crew.kickoff()
|
||||
|
||||
expected_string_output = "1. **The Rise of AI in Healthcare**: The convergence of AI and healthcare is a promising frontier, offering unprecedented opportunities for disease diagnosis and patient outcome prediction. AI's potential to revolutionize healthcare lies in its capacity to synthesize vast amounts of data, generating precise and efficient results. This technological breakthrough, however, is not just about improving accuracy and efficiency; it's about saving lives. As we stand on the precipice of this transformative era, we must prepare for the complex challenges and ethical questions it poses, while embracing its ability to reshape healthcare as we know it.\n\n2. **Ethical Implications of AI**: As AI intertwines with our daily lives, it presents a complex web of ethical dilemmas. This fusion of technology, philosophy, and ethics is not merely academically intriguing but profoundly impacts the fabric of our society. The questions raised range from decision-making transparency to accountability, and from privacy to potential biases. As we navigate this ethical labyrinth, it is crucial to establish robust frameworks and regulations to ensure that AI serves humanity, and not the other way around.\n\n3. **AI and Data Privacy**: The rise of AI brings with it an insatiable appetite for data, spawning new debates around privacy rights. Balancing the potential benefits of AI with the right to privacy is a unique challenge that intersects technology, law, and human rights. In an increasingly digital world, where personal information forms the backbone of many services, we must grapple with these issues. It's time to redefine the concept of privacy and devise innovative solutions that ensure our digital footprints are not abused.\n\n4. **AI in Job Market**: The discourse around AI's impact on employment is a narrative of contrast, a tale of displacement and creation. On one hand, AI threatens to automate a multitude of jobs, on the other, it promises to create new roles that we cannot yet imagine. This intersection of technology, economics, and labor rights is a critical dialogue that will shape our future. As we stand at this crossroads, we must not only brace ourselves for the changes but also seize the opportunities that this technological wave brings.\n\n5. **Future of AI Agents**: The evolution of AI agents signifies a leap towards a future where AI is not just a tool, but a partner. These sophisticated AI agents, employed in customer service to personal assistants, are redefining our interactions with technology. As we gaze into the future of AI agents, we see a landscape of possibilities and challenges. This journey will be about harnessing the potential of AI agents while navigating the issues of trust, dependence, and ethical use."
|
||||
|
||||
assert str(result) == expected_string_output
|
||||
assert result.raw_output() == expected_string_output
|
||||
assert isinstance(result, CrewOutput)
|
||||
assert len(result.tasks_output) == len(tasks)
|
||||
assert result.result() == [expected_string_output]
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_sync_task_execution():
|
||||
from unittest.mock import patch
|
||||
|
||||
tasks = [
|
||||
Task(
|
||||
description="Give me a list of 5 interesting ideas to explore for an article, what makes them unique and interesting.",
|
||||
expected_output="Bullet point list of 5 important events.",
|
||||
agent=researcher,
|
||||
),
|
||||
Task(
|
||||
description="Write an amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
|
||||
expected_output="A 4 paragraph article about AI.",
|
||||
agent=writer,
|
||||
),
|
||||
]
|
||||
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
process=Process.sequential,
|
||||
tasks=tasks,
|
||||
)
|
||||
|
||||
mock_task_output = TaskOutput(
|
||||
description="Mock description", raw_output="mocked output", agent="mocked agent"
|
||||
)
|
||||
|
||||
# Because we are mocking execute_sync, we never hit the underlying _execute_core
|
||||
# which sets the output attribute of the task
|
||||
for task in tasks:
|
||||
task.output = mock_task_output
|
||||
|
||||
with patch.object(
|
||||
Task, "execute_sync", return_value=mock_task_output
|
||||
) as mock_execute_sync:
|
||||
crew.kickoff()
|
||||
|
||||
# Assert that execute_sync was called for each task
|
||||
assert mock_execute_sync.call_count == len(tasks)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_hierarchical_process():
|
||||
@@ -157,9 +206,11 @@ def test_hierarchical_process():
|
||||
manager_llm=ChatOpenAI(temperature=0, model="gpt-4"),
|
||||
tasks=[task],
|
||||
)
|
||||
|
||||
result = crew.kickoff()
|
||||
|
||||
assert (
|
||||
result
|
||||
result.raw_output()
|
||||
== "1. 'Demystifying AI: An in-depth exploration of Artificial Intelligence for the layperson' - In this piece, we will unravel the enigma of AI, simplifying its complexities into digestible information for the everyday individual. By using relatable examples and analogies, we will journey through the neural networks and machine learning algorithms that define AI, without the jargon and convoluted explanations that often accompany such topics.\n\n2. 'The Role of AI in Startups: A Game Changer?' - Startups today are harnessing the power of AI to revolutionize their businesses. This article will delve into how AI, as an innovative force, is shaping the startup ecosystem, transforming everything from customer service to product development. We'll explore real-life case studies of startups that have leveraged AI to accelerate their growth and disrupt their respective industries.\n\n3. 'AI and Ethics: Navigating the Complex Landscape' - AI brings with it not just technological advancements, but ethical dilemmas as well. This article will engage readers in a thought-provoking discussion on the ethical implications of AI, exploring issues like bias in algorithms, privacy concerns, job displacement, and the moral responsibility of AI developers. We will also discuss potential solutions and frameworks to address these challenges.\n\n4. 'Unveiling the AI Agents: The Future of Customer Service' - AI agents are poised to reshape the customer service landscape, offering businesses the ability to provide round-the-clock support and personalized experiences. In this article, we'll dive deep into the world of AI agents, examining how they work, their benefits and limitations, and how they're set to redefine customer interactions in the digital age.\n\n5. 'From Science Fiction to Reality: AI in Everyday Life' - AI, once a concept limited to the realm of sci-fi, has now permeated our daily lives. This article will highlight the ubiquitous presence of AI, from voice assistants and recommendation algorithms, to autonomous vehicles and smart homes. We'll explore how AI, in its various forms, is transforming our everyday experiences, making the future seem a lot closer than we imagined."
|
||||
)
|
||||
|
||||
@@ -194,8 +245,10 @@ def test_crew_with_delegating_agents():
|
||||
tasks=tasks,
|
||||
)
|
||||
|
||||
result = crew.kickoff()
|
||||
|
||||
assert (
|
||||
crew.kickoff()
|
||||
result.raw_output()
|
||||
== "AI Agents, simply put, are intelligent systems that can perceive their environment and take actions to reach specific goals. Imagine them as digital assistants that can learn, adapt and make decisions. They operate in the realms of software or hardware, like a chatbot on a website or a self-driving car. The key to their intelligence is their ability to learn from their experiences, making them better at their tasks over time. In today's interconnected world, AI agents are transforming our lives. They enhance customer service experiences, streamline business processes, and even predict trends in data. Vehicles equipped with AI agents are making transportation safer. In healthcare, AI agents are helping to diagnose diseases, personalizing treatment plans, and monitoring patient health. As we embrace the digital era, these AI agents are not just important, they're becoming indispensable, shaping a future where technology works intuitively and intelligently to meet our needs."
|
||||
)
|
||||
|
||||
@@ -359,43 +412,6 @@ def test_api_calls_throttling(capsys):
|
||||
moveon.assert_called()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_full_output():
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
allow_delegation=False,
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
task1 = Task(
|
||||
description="just say hi!",
|
||||
expected_output="your greeting",
|
||||
agent=agent,
|
||||
)
|
||||
task2 = Task(
|
||||
description="just say hello!",
|
||||
expected_output="your greeting",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task1, task2], full_output=True)
|
||||
|
||||
result = crew.kickoff()
|
||||
|
||||
assert result == {
|
||||
"final_output": "Hello!",
|
||||
"tasks_outputs": [task1.output, task2.output],
|
||||
"usage_metrics": {
|
||||
"total_tokens": 348,
|
||||
"prompt_tokens": 314,
|
||||
"completion_tokens": 34,
|
||||
"successful_requests": 2,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_kickoff_for_each_full_ouput():
|
||||
inputs = [
|
||||
@@ -435,9 +451,186 @@ def test_crew_kickoff_for_each_full_ouput():
|
||||
assert result["usage_metrics"][key] > 0
|
||||
|
||||
|
||||
def test_agents_rpm_is_never_set_if_crew_max_RPM_is_not_set():
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
allow_delegation=False,
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="just say hi!",
|
||||
expected_output="your greeting",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
Crew(agents=[agent], tasks=[task], verbose=2)
|
||||
|
||||
assert agent._rpm_controller is None
|
||||
|
||||
|
||||
"""
|
||||
Future tests:
|
||||
TODO: 1 async task, 1 sync task. Make sure sync task waits for async to finish before starting.[]
|
||||
TODO: 3 async tasks, 1 sync task. Make sure sync task waits for async to finish before starting.
|
||||
TODO: 1 sync task, 1 async task. Make sure we wait for result from async before finishing crew.
|
||||
|
||||
TODO: 3 async tasks, 1 sync task. Make sure context from all 3 async tasks is passed to sync task.
|
||||
TODO: 3 async tasks, 1 sync task. Pass in context from only 1 async task to sync task.
|
||||
|
||||
TODO: Test pydantic output of CrewOutput and test type in CrewOutput result
|
||||
TODO: Test json output of CrewOutput and test type in CrewOutput result
|
||||
|
||||
TODO: TEST THE SAME THING BUT WITH HIERARCHICAL PROCESS
|
||||
"""
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_sequential_async_task_execution_completion():
|
||||
list_ideas = Task(
|
||||
description="Give me a list of 5 interesting ideas to explore for an article, what makes them unique and interesting.",
|
||||
expected_output="Bullet point list of 5 important events.",
|
||||
agent=researcher,
|
||||
async_execution=True,
|
||||
)
|
||||
list_important_history = Task(
|
||||
description="Research the history of AI and give me the 5 most important events that shaped the technology.",
|
||||
expected_output="Bullet point list of 5 important events.",
|
||||
agent=researcher,
|
||||
async_execution=True,
|
||||
)
|
||||
write_article = Task(
|
||||
description="Write an article about the history of AI and its most important events.",
|
||||
expected_output="A 4 paragraph article about AI.",
|
||||
agent=writer,
|
||||
context=[list_ideas, list_important_history],
|
||||
)
|
||||
|
||||
sequential_crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
process=Process.sequential,
|
||||
tasks=[list_ideas, list_important_history, write_article],
|
||||
)
|
||||
|
||||
sequential_result = sequential_crew.kickoff()
|
||||
assert sequential_result.raw_output().startswith(
|
||||
"**The Evolution of Artificial Intelligence: A Journey Through Milestones**"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_hierarchical_async_task_execution_completion():
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
list_ideas = Task(
|
||||
description="Give me a list of 5 interesting ideas to explore for na article, what makes them unique and interesting.",
|
||||
expected_output="Bullet point list of 5 important events.",
|
||||
agent=researcher,
|
||||
async_execution=True,
|
||||
)
|
||||
list_important_history = Task(
|
||||
description="Research the history of AI and give me the 5 most important events that shaped the technology.",
|
||||
expected_output="Bullet point list of 5 important events.",
|
||||
agent=researcher,
|
||||
async_execution=True,
|
||||
)
|
||||
write_article = Task(
|
||||
description="Write an article about the history of AI and its most important events.",
|
||||
expected_output="A 4 paragraph article about AI.",
|
||||
agent=writer,
|
||||
context=[list_ideas, list_important_history],
|
||||
)
|
||||
|
||||
hierarchical_crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
process=Process.hierarchical,
|
||||
tasks=[list_ideas, list_important_history, write_article],
|
||||
manager_llm=ChatOpenAI(temperature=0, model="gpt-4"),
|
||||
)
|
||||
|
||||
hierarchical_result = hierarchical_crew.kickoff()
|
||||
|
||||
assert hierarchical_result.raw_output().startswith(
|
||||
"The history of artificial intelligence (AI) is a fascinating journey"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_single_task_with_async_execution():
|
||||
|
||||
researcher_agent = Agent(
|
||||
role="Researcher",
|
||||
goal="Make the best research and analysis on content about AI and AI agents",
|
||||
backstory="You're an expert researcher, specialized in technology, software engineering, AI and startups. You work as a freelancer and is now working on doing research and analysis for a new customer.",
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
list_ideas = Task(
|
||||
description="Generate a list of 5 interesting ideas to explore for an article, where each bulletpoint is under 15 words.",
|
||||
expected_output="Bullet point list of 5 important events. No additional commentary.",
|
||||
agent=researcher_agent,
|
||||
async_execution=True,
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[researcher_agent],
|
||||
process=Process.sequential,
|
||||
tasks=[list_ideas],
|
||||
)
|
||||
|
||||
result = crew.kickoff()
|
||||
print(result.raw_output())
|
||||
assert result.raw_output().startswith(
|
||||
"- The impact of AI agents on remote work productivity."
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_three_task_with_async_execution():
|
||||
researcher_agent = Agent(
|
||||
role="Researcher",
|
||||
goal="Make the best research and analysis on content about AI and AI agents",
|
||||
backstory="You're an expert researcher, specialized in technology, software engineering, AI and startups. You work as a freelancer and is now working on doing research and analysis for a new customer.",
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
bullet_list = Task(
|
||||
description="Generate a list of 5 interesting ideas to explore for an article, where each bulletpoint is under 15 words.",
|
||||
expected_output="Bullet point list of 5 important events. No additional commentary.",
|
||||
agent=researcher_agent,
|
||||
async_execution=True,
|
||||
)
|
||||
numbered_list = Task(
|
||||
description="Generate a list of 5 interesting ideas to explore for an article, where each bulletpoint is under 15 words.",
|
||||
expected_output="Numbered list of 5 important events. No additional commentary.",
|
||||
agent=researcher_agent,
|
||||
async_execution=True,
|
||||
)
|
||||
letter_list = Task(
|
||||
description="Generate a list of 5 interesting ideas to explore for an article, where each bulletpoint is under 15 words.",
|
||||
expected_output="Numbered list using [A), B), C)] list of 5 important events. No additional commentary.",
|
||||
agent=researcher_agent,
|
||||
async_execution=True,
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[researcher_agent],
|
||||
process=Process.sequential,
|
||||
tasks=[bullet_list, numbered_list, letter_list],
|
||||
)
|
||||
|
||||
# Expected result is that we are going to concatenate the output from each async task.
|
||||
# Because we add a buffer between each task, we should see a "----------" string
|
||||
# after the first and second task in the final output.
|
||||
result = crew.kickoff()
|
||||
assert result.raw_output().count("\n\n----------\n\n") == 2
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.asyncio
|
||||
async def test_crew_async_kickoff_for_each_full_ouput():
|
||||
async def test_crew_async_kickoff():
|
||||
inputs = [
|
||||
{"topic": "dog"},
|
||||
{"topic": "cat"},
|
||||
@@ -461,9 +654,6 @@ async def test_crew_async_kickoff_for_each_full_ouput():
|
||||
|
||||
assert len(results) == len(inputs)
|
||||
for result in results:
|
||||
assert "usage_metrics" in result
|
||||
assert isinstance(result["usage_metrics"], dict)
|
||||
|
||||
# Assert that all required keys are in usage_metrics and their values are not None
|
||||
for key in [
|
||||
"total_tokens",
|
||||
@@ -471,36 +661,14 @@ async def test_crew_async_kickoff_for_each_full_ouput():
|
||||
"completion_tokens",
|
||||
"successful_requests",
|
||||
]:
|
||||
assert key in result["usage_metrics"]
|
||||
assert key in result.token_usage
|
||||
# TODO: FIX THIS WHEN USAGE METRICS ARE RE-DONE
|
||||
# assert result["usage_metrics"][key] > 0
|
||||
# assert result.token_usage[key] > 0
|
||||
|
||||
|
||||
def test_agents_rpm_is_never_set_if_crew_max_RPM_is_not_set():
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
allow_delegation=False,
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="just say hi!",
|
||||
expected_output="your greeting",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
Crew(agents=[agent], tasks=[task], verbose=2)
|
||||
|
||||
assert agent._rpm_controller is None
|
||||
|
||||
|
||||
def test_async_task_execution():
|
||||
import threading
|
||||
from unittest.mock import patch
|
||||
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_async_task_execution_call_count():
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
list_ideas = Task(
|
||||
description="Give me a list of 5 interesting ideas to explore for na article, what makes them unique and interesting.",
|
||||
@@ -518,7 +686,6 @@ def test_async_task_execution():
|
||||
description="Write an article about the history of AI and its most important events.",
|
||||
expected_output="A 4 paragraph article about AI.",
|
||||
agent=writer,
|
||||
context=[list_ideas, list_important_history],
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
@@ -527,25 +694,30 @@ def test_async_task_execution():
|
||||
tasks=[list_ideas, list_important_history, write_article],
|
||||
)
|
||||
|
||||
with patch.object(Agent, "execute_task") as execute:
|
||||
execute.return_value = "ok"
|
||||
with patch.object(threading.Thread, "start") as start:
|
||||
thread = threading.Thread(target=lambda: None, args=()).start()
|
||||
start.return_value = thread
|
||||
with patch.object(threading.Thread, "join", wraps=thread.join()) as join:
|
||||
list_ideas.output = TaskOutput(
|
||||
description="A 4 paragraph article about AI.",
|
||||
raw_output="ok",
|
||||
agent="writer",
|
||||
)
|
||||
list_important_history.output = TaskOutput(
|
||||
description="A 4 paragraph article about AI.",
|
||||
raw_output="ok",
|
||||
agent="writer",
|
||||
)
|
||||
crew.kickoff()
|
||||
start.assert_called()
|
||||
join.assert_called()
|
||||
# Create a valid TaskOutput instance to mock the return value
|
||||
mock_task_output = TaskOutput(
|
||||
description="Mock description", raw_output="mocked output", agent="mocked agent"
|
||||
)
|
||||
|
||||
# Create a MagicMock Future instance
|
||||
mock_future = MagicMock(spec=Future)
|
||||
mock_future.result.return_value = mock_task_output
|
||||
|
||||
# Directly set the output attribute for each task
|
||||
list_ideas.output = mock_task_output
|
||||
list_important_history.output = mock_task_output
|
||||
write_article.output = mock_task_output
|
||||
|
||||
with patch.object(
|
||||
Task, "execute_sync", return_value=mock_task_output
|
||||
) as mock_execute_sync, patch.object(
|
||||
Task, "execute_async", return_value=mock_future
|
||||
) as mock_execute_async:
|
||||
|
||||
crew.kickoff()
|
||||
|
||||
assert mock_execute_async.call_count == 2
|
||||
assert mock_execute_sync.call_count == 1
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -574,7 +746,9 @@ def test_kickoff_for_each_single_input():
|
||||
results = crew.kickoff_for_each(inputs=inputs)
|
||||
|
||||
assert len(results) == 1
|
||||
assert results == expected_outputs
|
||||
print("RESULT:", results)
|
||||
for result in results:
|
||||
assert result == expected_outputs[0]
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -933,7 +1107,7 @@ def test_task_with_no_arguments():
|
||||
crew = Crew(agents=[researcher], tasks=[task])
|
||||
|
||||
result = crew.kickoff()
|
||||
assert result == "75"
|
||||
assert result.raw_output() == "75"
|
||||
|
||||
|
||||
def test_code_execution_flag_adds_code_tool_upon_kickoff():
|
||||
@@ -954,9 +1128,12 @@ def test_code_execution_flag_adds_code_tool_upon_kickoff():
|
||||
)
|
||||
|
||||
crew = Crew(agents=[programmer], tasks=[task])
|
||||
crew.kickoff()
|
||||
assert len(programmer.tools) == 1
|
||||
assert programmer.tools[0].__class__ == CodeInterpreterTool
|
||||
|
||||
with patch.object(Agent, "execute_task") as executor:
|
||||
executor.return_value = "ok"
|
||||
crew.kickoff()
|
||||
assert len(programmer.tools) == 1
|
||||
assert programmer.tools[0].__class__ == CodeInterpreterTool
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -977,10 +1154,9 @@ def test_delegation_is_not_enabled_if_there_are_only_one_agent():
|
||||
)
|
||||
|
||||
crew = Crew(agents=[researcher], tasks=[task])
|
||||
with patch.object(Task, "execute") as execute:
|
||||
execute.return_value = "ok"
|
||||
crew.kickoff()
|
||||
assert task.tools == []
|
||||
|
||||
crew.kickoff()
|
||||
assert task.tools == []
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -998,7 +1174,7 @@ def test_agents_do_not_get_delegation_tools_with_there_is_only_one_agent():
|
||||
|
||||
result = crew.kickoff()
|
||||
assert (
|
||||
result
|
||||
result.raw_output()
|
||||
== "Howdy! I hope this message finds you well and brings a smile to your face. Have a fantastic day!"
|
||||
)
|
||||
assert len(agent.tools) == 0
|
||||
@@ -1018,13 +1194,17 @@ def test_agent_usage_metrics_are_captured_for_sequential_process():
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
|
||||
result = crew.kickoff()
|
||||
assert result == "Howdy!"
|
||||
assert crew.usage_metrics == {
|
||||
"completion_tokens": 17,
|
||||
"prompt_tokens": 158,
|
||||
"successful_requests": 1,
|
||||
"total_tokens": 175,
|
||||
}
|
||||
assert result.raw_output() == "Howdy!"
|
||||
|
||||
required_keys = [
|
||||
"total_tokens",
|
||||
"prompt_tokens",
|
||||
"completion_tokens",
|
||||
"successful_requests",
|
||||
]
|
||||
for key in required_keys:
|
||||
assert key in crew.usage_metrics, f"Key '{key}' not found in usage_metrics"
|
||||
assert crew.usage_metrics[key] > 0, f"Value for key '{key}' is zero"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -1071,7 +1251,7 @@ def test_agent_usage_metrics_are_captured_for_hierarchical_process():
|
||||
)
|
||||
|
||||
result = crew.kickoff()
|
||||
assert result == '"Howdy!"'
|
||||
assert result.raw_output() == '"Howdy!"'
|
||||
|
||||
assert crew.usage_metrics == {
|
||||
"total_tokens": 1927,
|
||||
@@ -1190,9 +1370,81 @@ def test_crew_inputs_interpolate_both_agents_and_tasks_diff():
|
||||
interpolate_task_inputs.assert_called()
|
||||
|
||||
|
||||
def test_task_callback_on_crew():
|
||||
def test_crew_does_not_interpolate_without_inputs():
|
||||
from unittest.mock import patch
|
||||
|
||||
agent = Agent(
|
||||
role="{topic} Researcher",
|
||||
goal="Express hot takes on {topic}.",
|
||||
backstory="You have a lot of experience with {topic}.",
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Give me an analysis around {topic}.",
|
||||
expected_output="{points} bullet points about {topic}.",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
|
||||
with patch.object(Agent, "interpolate_inputs") as interpolate_agent_inputs:
|
||||
with patch.object(Task, "interpolate_inputs") as interpolate_task_inputs:
|
||||
crew.kickoff()
|
||||
interpolate_agent_inputs.assert_not_called()
|
||||
interpolate_task_inputs.assert_not_called()
|
||||
|
||||
|
||||
# TODO: Ask @joao if we want to start throwing errors if inputs are not provided
|
||||
# def test_crew_partial_inputs():
|
||||
# agent = Agent(
|
||||
# role="{topic} Researcher",
|
||||
# goal="Express hot takes on {topic}.",
|
||||
# backstory="You have a lot of experience with {topic}.",
|
||||
# )
|
||||
|
||||
# task = Task(
|
||||
# description="Give me an analysis around {topic}.",
|
||||
# expected_output="{points} bullet points about {topic}.",
|
||||
# )
|
||||
|
||||
# crew = Crew(agents=[agent], tasks=[task], inputs={"topic": "AI"})
|
||||
# inputs = {"topic": "AI"}
|
||||
# crew._interpolate_inputs(inputs=inputs) # Manual call for now
|
||||
|
||||
# assert crew.tasks[0].description == "Give me an analysis around AI."
|
||||
# assert crew.tasks[0].expected_output == "{points} bullet points about AI."
|
||||
# assert crew.agents[0].role == "AI Researcher"
|
||||
# assert crew.agents[0].goal == "Express hot takes on AI."
|
||||
# assert crew.agents[0].backstory == "You have a lot of experience with AI."
|
||||
|
||||
|
||||
# TODO: If we do want ot throw errors if we are missing inputs. Add in this test.
|
||||
# def test_crew_invalid_inputs():
|
||||
# agent = Agent(
|
||||
# role="{topic} Researcher",
|
||||
# goal="Express hot takes on {topic}.",
|
||||
# backstory="You have a lot of experience with {topic}.",
|
||||
# )
|
||||
|
||||
# task = Task(
|
||||
# description="Give me an analysis around {topic}.",
|
||||
# expected_output="{points} bullet points about {topic}.",
|
||||
# )
|
||||
|
||||
# crew = Crew(agents=[agent], tasks=[task], inputs={"subject": "AI"})
|
||||
# inputs = {"subject": "AI"}
|
||||
# crew._interpolate_inputs(inputs=inputs) # Manual call for now
|
||||
|
||||
# assert crew.tasks[0].description == "Give me an analysis around {topic}."
|
||||
# assert crew.tasks[0].expected_output == "{points} bullet points about {topic}."
|
||||
# assert crew.agents[0].role == "{topic} Researcher"
|
||||
# assert crew.agents[0].goal == "Express hot takes on {topic}."
|
||||
# assert crew.agents[0].backstory == "You have a lot of experience with {topic}."
|
||||
|
||||
|
||||
def test_task_callback_on_crew():
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
researcher_agent = Agent(
|
||||
role="Researcher",
|
||||
goal="Make the best research and analysis on content about AI and AI agents",
|
||||
@@ -1207,17 +1459,23 @@ def test_task_callback_on_crew():
|
||||
async_execution=True,
|
||||
)
|
||||
|
||||
mock_callback = MagicMock()
|
||||
|
||||
crew = Crew(
|
||||
agents=[researcher_agent],
|
||||
process=Process.sequential,
|
||||
tasks=[list_ideas],
|
||||
task_callback=lambda: None,
|
||||
task_callback=mock_callback,
|
||||
)
|
||||
|
||||
with patch.object(Agent, "execute_task") as execute:
|
||||
execute.return_value = "ok"
|
||||
crew.kickoff()
|
||||
|
||||
assert list_ideas.callback is not None
|
||||
mock_callback.assert_called_once()
|
||||
args, _ = mock_callback.call_args
|
||||
assert isinstance(args[0], TaskOutput)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -1289,7 +1547,7 @@ def test_tools_with_custom_caching():
|
||||
input={"first_number": 2, "second_number": 6},
|
||||
output=12,
|
||||
)
|
||||
assert result == "3"
|
||||
assert result.raw_output() == "3"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -1387,10 +1645,20 @@ def test_manager_agent():
|
||||
tasks=[task],
|
||||
)
|
||||
|
||||
with patch.object(Task, "execute") as execute:
|
||||
mock_task_output = TaskOutput(
|
||||
description="Mock description", raw_output="mocked output", agent="mocked agent"
|
||||
)
|
||||
|
||||
# Because we are mocking execute_sync, we never hit the underlying _execute_core
|
||||
# which sets the output attribute of the task
|
||||
task.output = mock_task_output
|
||||
|
||||
with patch.object(
|
||||
Task, "execute_sync", return_value=mock_task_output
|
||||
) as mock_execute_sync:
|
||||
crew.kickoff()
|
||||
assert manager.allow_delegation is True
|
||||
execute.assert_called()
|
||||
mock_execute_sync.assert_called()
|
||||
|
||||
|
||||
def test_manager_agent_in_agents_raises_exception():
|
||||
@@ -1552,3 +1820,8 @@ def test__setup_for_training():
|
||||
|
||||
for agent in agents:
|
||||
assert agent.allow_delegation is False
|
||||
|
||||
|
||||
# TODO: TEST EXPORT OUTPUT TASK WITH PYDANTIC
|
||||
# TODO: TEST EXPORT OUTPUT TASK WITH JSON
|
||||
# TODO: TEST EXPORT OUTPUT TASK CALLBACK
|
||||
|
||||
@@ -9,6 +9,7 @@ from pydantic_core import ValidationError
|
||||
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.utilities.converter import Converter
|
||||
|
||||
|
||||
def test_task_tool_reflect_agent_tools():
|
||||
@@ -80,7 +81,7 @@ def test_task_prompt_includes_expected_output():
|
||||
|
||||
with patch.object(Agent, "execute_task") as execute:
|
||||
execute.return_value = "ok"
|
||||
task.execute()
|
||||
task.execute_sync()
|
||||
execute.assert_called_once_with(task=task, context=None, tools=[])
|
||||
|
||||
|
||||
@@ -103,7 +104,7 @@ def test_task_callback():
|
||||
|
||||
with patch.object(Agent, "execute_task") as execute:
|
||||
execute.return_value = "ok"
|
||||
task.execute()
|
||||
task.execute_sync()
|
||||
task_completed.assert_called_once_with(task.output)
|
||||
|
||||
|
||||
@@ -126,7 +127,7 @@ def test_task_callback_returns_task_ouput():
|
||||
|
||||
with patch.object(Agent, "execute_task") as execute:
|
||||
execute.return_value = "exported_ok"
|
||||
task.execute()
|
||||
task.execute_sync()
|
||||
# Ensure the callback is called with a TaskOutput object serialized to JSON
|
||||
task_completed.assert_called_once()
|
||||
callback_data = task_completed.call_args[0][0]
|
||||
@@ -161,7 +162,7 @@ def test_execute_with_agent():
|
||||
)
|
||||
|
||||
with patch.object(Agent, "execute_task", return_value="ok") as execute:
|
||||
task.execute(agent=researcher)
|
||||
task.execute_sync(agent=researcher)
|
||||
execute.assert_called_once_with(task=task, context=None, tools=[])
|
||||
|
||||
|
||||
@@ -181,7 +182,7 @@ def test_async_execution():
|
||||
)
|
||||
|
||||
with patch.object(Agent, "execute_task", return_value="ok") as execute:
|
||||
task.execute(agent=researcher)
|
||||
task.execute_async(agent=researcher)
|
||||
execute.assert_called_once_with(task=task, context=None, tools=[])
|
||||
|
||||
|
||||
@@ -393,6 +394,38 @@ def test_save_task_pydantic_output():
|
||||
save_file.assert_called_once_with('{"score":4}')
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_custom_converter_cls():
|
||||
class ScoreOutput(BaseModel):
|
||||
score: int
|
||||
|
||||
class ScoreConverter(Converter):
|
||||
pass
|
||||
|
||||
scorer = Agent(
|
||||
role="Scorer",
|
||||
goal="Score the title",
|
||||
backstory="You're an expert scorer, specialized in scoring titles.",
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Give me an integer score between 1-5 for the following title: 'The impact of AI in the future of work'",
|
||||
expected_output="The score of the title.",
|
||||
output_pydantic=ScoreOutput,
|
||||
converter_cls=ScoreConverter,
|
||||
agent=scorer,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[scorer], tasks=[task])
|
||||
|
||||
with patch.object(
|
||||
ScoreConverter, "to_pydantic", return_value=ScoreOutput(score=5)
|
||||
) as mock_to_pydantic:
|
||||
crew.kickoff()
|
||||
mock_to_pydantic.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_increment_delegations_for_hierarchical_process():
|
||||
from langchain_openai import ChatOpenAI
|
||||
@@ -413,31 +446,29 @@ def test_increment_delegations_for_hierarchical_process():
|
||||
agents=[scorer],
|
||||
tasks=[task],
|
||||
process=Process.hierarchical,
|
||||
manager_llm=ChatOpenAI(model="gpt-4-0125-preview"),
|
||||
manager_llm=ChatOpenAI(model="gpt-4o"),
|
||||
)
|
||||
|
||||
with patch.object(Task, "increment_delegations") as increment_delegations:
|
||||
increment_delegations.return_value = None
|
||||
crew.kickoff()
|
||||
increment_delegations.assert_called_once
|
||||
increment_delegations.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_increment_delegations_for_sequential_process():
|
||||
pass
|
||||
|
||||
manager = Agent(
|
||||
role="Manager",
|
||||
goal="Coordinate scoring processes",
|
||||
backstory="You're great at delegating work about scoring.",
|
||||
allow_delegation=False,
|
||||
allow_delegation=True,
|
||||
)
|
||||
|
||||
scorer = Agent(
|
||||
role="Scorer",
|
||||
goal="Score the title",
|
||||
backstory="You're an expert scorer, specialized in scoring titles.",
|
||||
allow_delegation=False,
|
||||
allow_delegation=True,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
@@ -455,7 +486,7 @@ def test_increment_delegations_for_sequential_process():
|
||||
with patch.object(Task, "increment_delegations") as increment_delegations:
|
||||
increment_delegations.return_value = None
|
||||
crew.kickoff()
|
||||
increment_delegations.assert_called_once
|
||||
increment_delegations.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -490,7 +521,7 @@ def test_increment_tool_errors():
|
||||
with patch.object(Task, "increment_tools_errors") as increment_tools_errors:
|
||||
increment_tools_errors.return_value = None
|
||||
crew.kickoff()
|
||||
increment_tools_errors.assert_called_once
|
||||
assert len(increment_tools_errors.mock_calls) == 3
|
||||
|
||||
|
||||
def test_task_definition_based_on_dict():
|
||||
@@ -525,3 +556,14 @@ def test_interpolate_inputs():
|
||||
== "Give me a list of 5 interesting ideas about ML to explore for an article, what makes them unique and interesting."
|
||||
)
|
||||
assert task.expected_output == "Bullet point list of 5 interesting ideas about ML."
|
||||
|
||||
|
||||
"""
|
||||
TODO: TEST SYNC
|
||||
- Verify return type
|
||||
"""
|
||||
|
||||
"""
|
||||
TODO: TEST ASYNC
|
||||
- Verify return type
|
||||
"""
|
||||
|
||||
Reference in New Issue
Block a user