mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-24 08:18:31 +00:00
Compare commits
119 Commits
v0.5.5
...
bug/create
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
59a5f51fd7 | ||
|
|
375946c15a | ||
|
|
637bd885cf | ||
|
|
337afe228f | ||
|
|
4541835487 | ||
|
|
04d9603449 | ||
|
|
671a8d0180 | ||
|
|
3950878690 | ||
|
|
eaac627600 | ||
|
|
35f8919e73 | ||
|
|
cb5a528550 | ||
|
|
1f95d7b982 | ||
|
|
46971ee985 | ||
|
|
e67009ee2e | ||
|
|
9d3da98251 | ||
|
|
b94de6e947 | ||
|
|
f8a1d4f414 | ||
|
|
7deb268de8 | ||
|
|
47b5cbd211 | ||
|
|
a4e9b9ccfe | ||
|
|
99be4f5a61 | ||
|
|
ba28ab1680 | ||
|
|
e51b8aadae | ||
|
|
33354aa07e | ||
|
|
730b71fad8 | ||
|
|
364cf216a0 | ||
|
|
3cb48ac562 | ||
|
|
ea65283023 | ||
|
|
d2003cc32d | ||
|
|
1766e27337 | ||
|
|
442c324243 | ||
|
|
3134711240 | ||
|
|
546fc965f8 | ||
|
|
9ab45d9118 | ||
|
|
b1ae86757b | ||
|
|
42eeec5897 | ||
|
|
c12283bb16 | ||
|
|
b856b21fc6 | ||
|
|
72a0d1edef | ||
|
|
c0a0e01cf6 | ||
|
|
78bf008c36 | ||
|
|
5857c22daf | ||
|
|
5f73ba1371 | ||
|
|
4c09835abc | ||
|
|
0a025901c5 | ||
|
|
9768e4518f | ||
|
|
1f802ccb5a | ||
|
|
e1306a8e6a | ||
|
|
997c906b5f | ||
|
|
2530196cf8 | ||
|
|
340bea3271 | ||
|
|
3df3bba756 | ||
|
|
a9863fe670 | ||
|
|
7b49b4e985 | ||
|
|
577db88f8e | ||
|
|
01a2e650a4 | ||
|
|
cd9f7931c9 | ||
|
|
2b04ae4e4a | ||
|
|
cd0b82e794 | ||
|
|
0ddcffe601 | ||
|
|
712d106a44 | ||
|
|
34c5560cb0 | ||
|
|
dcba1488a6 | ||
|
|
8e4b156f11 | ||
|
|
ab98c3bd28 | ||
|
|
7f98a99e90 | ||
|
|
101b80c234 | ||
|
|
44598babcb | ||
|
|
51edfb4604 | ||
|
|
12d6fa1494 | ||
|
|
99a15ac2ae | ||
|
|
093a9c8174 | ||
|
|
464dfc4e67 | ||
|
|
1c7f9826b4 | ||
|
|
e397a49c23 | ||
|
|
8c925237e7 | ||
|
|
0593d52b91 | ||
|
|
7b7d714109 | ||
|
|
e9aa87f62b | ||
|
|
8f5d735b2f | ||
|
|
e24f4867df | ||
|
|
ef024ca106 | ||
|
|
4c519d9d98 | ||
|
|
94cb96b288 | ||
|
|
108a0d36b7 | ||
|
|
efb097a76b | ||
|
|
af03042852 | ||
|
|
21667bc7e1 | ||
|
|
19b6c15fff | ||
|
|
3ef502024d | ||
|
|
e55cee7372 | ||
|
|
b72eb838c2 | ||
|
|
b21191dd55 | ||
|
|
76b17a8d04 | ||
|
|
e97d1a0cf8 | ||
|
|
c875d887b7 | ||
|
|
44d9cbca81 | ||
|
|
6e399101fd | ||
|
|
e8e3617ba6 | ||
|
|
45fa30c007 | ||
|
|
15768d9c4d | ||
|
|
a1fcaa398c | ||
|
|
871643d98d | ||
|
|
91659d6488 | ||
|
|
0076ea7bff | ||
|
|
e79da7bc05 | ||
|
|
00206a62ab | ||
|
|
d0b0a33be3 | ||
|
|
6ea21e95b6 | ||
|
|
c226dafd0d | ||
|
|
d4c21a23f4 | ||
|
|
b76ae5b921 | ||
|
|
b48e5af9a0 | ||
|
|
d36c2a74cb | ||
|
|
a1e0596450 | ||
|
|
596e243374 | ||
|
|
326ad08ba2 | ||
|
|
f63d4edbb4 | ||
|
|
0057ed6786 |
4
.gitignore
vendored
4
.gitignore
vendored
@@ -5,4 +5,6 @@ dist/
|
||||
.env
|
||||
assets/*
|
||||
.idea
|
||||
test.py
|
||||
test/
|
||||
docs_crew/
|
||||
chroma.sqlite3
|
||||
@@ -1,11 +1,11 @@
|
||||
repos:
|
||||
|
||||
- repo: https://github.com/psf/black-pre-commit-mirror
|
||||
rev: 23.12.1
|
||||
hooks:
|
||||
- id: black
|
||||
language_version: python3.11
|
||||
files: \.(py)$
|
||||
exclude: 'src/crewai/cli/templates/(crew|main)\.py'
|
||||
|
||||
- repo: https://github.com/pycqa/isort
|
||||
rev: 5.13.2
|
||||
|
||||
62
README.md
62
README.md
@@ -24,6 +24,7 @@
|
||||
- [Key Features](#key-features)
|
||||
- [Examples](#examples)
|
||||
- [Quick Tutorial](#quick-tutorial)
|
||||
- [Write Job Descriptions](#write-job-descriptions)
|
||||
- [Trip Planner](#trip-planner)
|
||||
- [Stock Analysis](#stock-analysis)
|
||||
- [Connecting Your Crew to a Model](#connecting-your-crew-to-a-model)
|
||||
@@ -48,10 +49,10 @@ To get started with CrewAI, follow these simple steps:
|
||||
pip install crewai
|
||||
```
|
||||
|
||||
The example below also uses DuckDuckGo's Search. You can install it with `pip` too:
|
||||
If you want to also install crewai-tools, which is a package with tools that can be used by the agents, but more dependencies, you can install it with, example bellow uses it:
|
||||
|
||||
```shell
|
||||
pip install duckduckgo-search
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
### 2. Setting Up Your Crew
|
||||
@@ -59,18 +60,18 @@ pip install duckduckgo-search
|
||||
```python
|
||||
import os
|
||||
from crewai import Agent, Task, Crew, Process
|
||||
from crewai_tools import SerperDevTool
|
||||
|
||||
os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY"
|
||||
os.environ["SERPER_API_KEY"] = "Your Key" # serper.dev API key
|
||||
|
||||
# You can choose to use a local model through Ollama for example. See ./docs/how-to/llm-connections.md for more information.
|
||||
# from langchain_community.llms import Ollama
|
||||
# ollama_llm = Ollama(model="openhermes")
|
||||
# You can choose to use a local model through Ollama for example. See https://docs.crewai.com/how-to/LLM-Connections/ for more information.
|
||||
|
||||
# Install duckduckgo-search for this example:
|
||||
# !pip install -U duckduckgo-search
|
||||
# os.environ["OPENAI_API_BASE"] = 'http://localhost:11434/v1'
|
||||
# os.environ["OPENAI_MODEL_NAME"] ='openhermes' # Adjust based on available model
|
||||
# os.environ["OPENAI_API_KEY"] ='sk-111111111111111111111111111111111111111111111111'
|
||||
|
||||
from langchain_community.tools import DuckDuckGoSearchRun
|
||||
search_tool = DuckDuckGoSearchRun()
|
||||
search_tool = SerperDevTool()
|
||||
|
||||
# Define your agents with roles and goals
|
||||
researcher = Agent(
|
||||
@@ -84,12 +85,12 @@ researcher = Agent(
|
||||
tools=[search_tool]
|
||||
# You can pass an optional llm attribute specifying what mode you wanna use.
|
||||
# It can be a local model through Ollama / LM Studio or a remote
|
||||
# model like OpenAI, Mistral, Antrophic or others (https://python.langchain.com/docs/integrations/llms/)
|
||||
# model like OpenAI, Mistral, Antrophic or others (https://docs.crewai.com/how-to/LLM-Connections/)
|
||||
#
|
||||
# Examples:
|
||||
# import os
|
||||
# os.environ['OPENAI_MODEL_NAME'] = 'gpt-3.5-turbo'
|
||||
#
|
||||
# from langchain_community.llms import Ollama
|
||||
# llm=ollama_llm # was defined above in the file
|
||||
# OR
|
||||
#
|
||||
# from langchain_openai import ChatOpenAI
|
||||
# llm=ChatOpenAI(model_name="gpt-3.5", temperature=0.7)
|
||||
@@ -100,15 +101,14 @@ writer = Agent(
|
||||
backstory="""You are a renowned Content Strategist, known for your insightful and engaging articles.
|
||||
You transform complex concepts into compelling narratives.""",
|
||||
verbose=True,
|
||||
allow_delegation=True,
|
||||
# (optional) llm=ollama_llm
|
||||
allow_delegation=True
|
||||
)
|
||||
|
||||
# Create tasks for your agents
|
||||
task1 = Task(
|
||||
description="""Conduct a comprehensive analysis of the latest advancements in AI in 2024.
|
||||
Identify key trends, breakthrough technologies, and potential industry impacts.
|
||||
Your final answer MUST be a full analysis report""",
|
||||
Identify key trends, breakthrough technologies, and potential industry impacts.""",
|
||||
expected_output="Full analysis report in bullet points",
|
||||
agent=researcher
|
||||
)
|
||||
|
||||
@@ -116,8 +116,8 @@ task2 = Task(
|
||||
description="""Using the insights provided, develop an engaging blog
|
||||
post that highlights the most significant AI advancements.
|
||||
Your post should be informative yet accessible, catering to a tech-savvy audience.
|
||||
Make it sound cool, avoid complex words so it doesn't sound like AI.
|
||||
Your final answer MUST be the full blog post of at least 4 paragraphs.""",
|
||||
Make it sound cool, avoid complex words so it doesn't sound like AI.""",
|
||||
expected_output="Full blog post of at least 4 paragraphs",
|
||||
agent=writer
|
||||
)
|
||||
|
||||
@@ -143,7 +143,9 @@ In addition to the sequential process, you can use the hierarchical process, whi
|
||||
- **Autonomous Inter-Agent Delegation**: Agents can autonomously delegate tasks and inquire amongst themselves, enhancing problem-solving efficiency.
|
||||
- **Flexible Task Management**: Define tasks with customizable tools and assign them to agents dynamically.
|
||||
- **Processes Driven**: Currently only supports `sequential` task execution and `hierarchical` processes, but more complex processes like consensual and autonomous are being worked on.
|
||||
- **Works with Open Source Models**: Run your crew using Open AI or open source models refer to the [Connect crewAI to LLMs](https://docs.crewai.com/how-to/LLM-Connections/) page for details on configuring you agents' connections to models, even ones running locally!
|
||||
- **Save output as file**: Save the output of individual tasks as a file, so you can use it later.
|
||||
- **Parse output as Pydantic or Json**: Parse the output of individual tasks as a Pydantic model or as a Json if you want to.
|
||||
- **Works with Open Source Models**: Run your crew using Open AI or open source models refer to the [Connect crewAI to LLMs](https://docs.crewai.com/how-to/LLM-Connections/) page for details on configuring your agents' connections to models, even ones running locally!
|
||||
|
||||

|
||||
|
||||
@@ -160,6 +162,12 @@ You can test different real life examples of AI crews in the [crewAI-examples re
|
||||
|
||||
[](https://www.youtube.com/watch?v=tnejrr-0a94 "CrewAI Tutorial")
|
||||
|
||||
### Write Job Descriptions
|
||||
|
||||
[Check out code for this example](https://github.com/joaomdmoura/crewAI-examples/tree/main/job-posting) or watch a video below:
|
||||
|
||||
[](https://www.youtube.com/watch?v=u98wEMz-9to "Jobs postings")
|
||||
|
||||
### Trip Planner
|
||||
|
||||
[Check out code for this example](https://github.com/joaomdmoura/crewAI-examples/tree/main/trip_planner) or watch a video below:
|
||||
@@ -180,7 +188,7 @@ Please refer to the [Connect crewAI to LLMs](https://docs.crewai.com/how-to/LLM-
|
||||
|
||||
## How CrewAI Compares
|
||||
|
||||
- **Autogen**: While Autogen excels in creating conversational agents capable of working together, it lacks an inherent concept of process. In Autogen, orchestrating agents' interactions requires additional programming, which can become complex and cumbersome as the scale of tasks grows.
|
||||
- **Autogen**: While Autogen does good in creating conversational agents capable of working together, it lacks an inherent concept of process. In Autogen, orchestrating agents' interactions requires additional programming, which can become complex and cumbersome as the scale of tasks grows.
|
||||
|
||||
- **ChatDev**: ChatDev introduced the idea of processes into the realm of AI agents, but its implementation is quite rigid. Customizations in ChatDev are limited and not geared towards production environments, which can hinder scalability and flexibility in real-world applications.
|
||||
|
||||
@@ -252,15 +260,27 @@ There is NO data being collected on the prompts, tasks descriptions agents backs
|
||||
|
||||
Data collected includes:
|
||||
- Version of crewAI
|
||||
- So we can understand how many users are using the latest version
|
||||
- Version of Python
|
||||
- So we can decide on what versions to better support
|
||||
- General OS (e.g. number of CPUs, macOS/Windows/Linux)
|
||||
- So we know what OS we should focus on and if we could build specific OS related features
|
||||
- Number of agents and tasks in a crew
|
||||
- So we make sure we are testing internally with similar use cases and educate people on the best practices
|
||||
- Crew Process being used
|
||||
- Understand where we should focus our efforts
|
||||
- If Agents are using memory or allowing delegation
|
||||
- Understand if we improved the features or maybe even drop them
|
||||
- If Tasks are being executed in parallel or sequentially
|
||||
- Understand if we should focus more on parallel execution
|
||||
- Language model being used
|
||||
- Improved support on most used languages
|
||||
- Roles of agents in a crew
|
||||
- Understand high level use cases so we can build better tools, integrations and examples about it
|
||||
- Tools names available
|
||||
- Understand out of the publically available tools, which ones are being used the most so we can improve them
|
||||
|
||||
Users can opt-in sharing the complete telemetry data by setting the `share_crew` attribute to `True` on their Crews.
|
||||
|
||||
## License
|
||||
|
||||
|
||||
@@ -10,29 +10,33 @@ description: What are crewAI Agents and how to use them.
|
||||
<li class='leading-3'>Perform tasks</li>
|
||||
<li class='leading-3'>Make decisions</li>
|
||||
<li class='leading-3'>Communicate with other agents</li>
|
||||
<ul>
|
||||
<br/>
|
||||
Think of an agent as a member of a team, with specific skills and a particular job to do. Agents can have different roles like 'Researcher', 'Writer', or 'Customer Support', each contributing to the overall goal of the crew.
|
||||
|
||||
## Agent Attributes
|
||||
|
||||
| Attribute | Description |
|
||||
| :---------- | :----------------------------------- |
|
||||
| **Role** | Defines the agent's function within the crew. It determines the kind of tasks the agent is best suited for. |
|
||||
| **Goal** | The individual objective that the agent aims to achieve. It guides the agent's decision-making process. |
|
||||
| **Backstory** | Provides context to the agent's role and goal, enriching the interaction and collaboration dynamics. |
|
||||
| **Tools** | Set of capabilities or functions that the agent can use to perform tasks. Tools can be shared or exclusive to specific agents. |
|
||||
| **Max Iter** | The maximum number of iterations the agent can perform before forced to give its best answer |
|
||||
| **Max RPM** | The maximum number of requests per minute the agent can perform to avoid rate limits |
|
||||
| **Verbose** | This allow you to actually see what is going on during the Crew execution. |
|
||||
| **Allow Delegation** | Agents can delegate tasks or questions to one another, ensuring that each task is handled by the most suitable agent. |
|
||||
|
||||
| Attribute | Description |
|
||||
| :--------------------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Role** | Defines the agent's function within the crew. It determines the kind of tasks the agent is best suited for. |
|
||||
| **Goal** | The individual objective that the agent aims to achieve. It guides the agent's decision-making process. |
|
||||
| **Backstory** | Provides context to the agent's role and goal, enriching the interaction and collaboration dynamics. |
|
||||
| **LLM** *(optional)* | The language model used by the agent to process and generate text. It dynamically fetches the model name from the `OPENAI_MODEL_NAME` environment variable, defaulting to "gpt-4" if not specified. |
|
||||
| **Tools** *(optional)* | Set of capabilities or functions that the agent can use to perform tasks. Tools can be shared or exclusive to specific agents. It's an attribute that can be set during the initialization of an agent, with a default value of an empty list. |
|
||||
| **Function Calling LLM** *(optional)* | If passed, this agent will use this LLM to execute function calling for tools instead of relying on the main LLM output. |
|
||||
| **Max Iter** *(optional)* | The maximum number of iterations the agent can perform before being forced to give its best answer. Default is `15`. |
|
||||
| **Max RPM** *(optional)* | The maximum number of requests per minute the agent can perform to avoid rate limits. It's optional and can be left unspecified, with a default value of `None`. |
|
||||
| **Verbose** *(optional)* | Enables detailed logging of the agent's execution for debugging or monitoring purposes when set to True. Default is `False`. |
|
||||
| **Allow Delegation** *(optional)* | Agents can delegate tasks or questions to one another, ensuring that each task is handled by the most suitable agent. Default is `True`. |
|
||||
| **Step Callback** *(optional)* | A function that is called after each step of the agent. This can be used to log the agent's actions or to perform other operations. It will overwrite the crew `step_callback`. |
|
||||
| **Memory** *(optional)* | Indicates whether the agent should have memory or not, with a default value of False. This impacts the agent's ability to remember past interactions. Default is `False`. |
|
||||
|
||||
## Creating an Agent
|
||||
|
||||
!!! note "Agent Interaction"
|
||||
Agents can interact with each other using the CrewAI's built-in delegation and communication mechanisms.<br/>This allows for dynamic task management and problem-solving within the crew.
|
||||
Agents can interact with each other using crewAI's built-in delegation and communication mechanisms. This allows for dynamic task management and problem-solving within the crew.
|
||||
|
||||
To create an agent, you would typically initialize an instance of the `Agent` class with the desired properties. Here's a conceptual example:
|
||||
To create an agent, you would typically initialize an instance of the `Agent` class with the desired properties. Here's a conceptual example including all attributes:
|
||||
|
||||
```python
|
||||
# Example: Creating an agent with all attributes
|
||||
@@ -46,11 +50,15 @@ agent = Agent(
|
||||
to the business.
|
||||
You're currently working on a project to analyze the
|
||||
performance of our marketing campaigns.""",
|
||||
tools=[my_tool1, my_tool2],
|
||||
max_iter=10,
|
||||
max_rpm=10,
|
||||
verbose=True,
|
||||
allow_delegation=True
|
||||
tools=[my_tool1, my_tool2], # Optional, defaults to an empty list
|
||||
llm=my_llm, # Optional
|
||||
function_calling_llm=my_llm, # Optional
|
||||
max_iter=15, # Optional
|
||||
max_rpm=None, # Optional
|
||||
verbose=True, # Optional
|
||||
allow_delegation=True, # Optional
|
||||
step_callback=my_intermediate_step_callback, # Optional
|
||||
memory=True # Optional
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
title: How Agents Collaborate in CrewAI
|
||||
description: Exploring the dynamics of agent collaboration within the CrewAI framework.
|
||||
description: Exploring the dynamics of agent collaboration within the CrewAI framework, focusing on the newly integrated features for enhanced functionality.
|
||||
---
|
||||
|
||||
## Collaboration Fundamentals
|
||||
@@ -11,14 +11,28 @@ description: Exploring the dynamics of agent collaboration within the CrewAI fra
|
||||
- **Task Assistance**: Allows agents to seek help from peers with the required expertise for specific tasks.
|
||||
- **Resource Allocation**: Optimizes task execution through the efficient distribution and sharing of resources among agents.
|
||||
|
||||
## Enhanced Attributes for Improved Collaboration
|
||||
The `Crew` class has been enriched with several attributes to support advanced functionalities:
|
||||
|
||||
- **Language Model Management (`manager_llm`, `function_calling_llm`)**: Manages language models for executing tasks and tools, facilitating sophisticated agent-tool interactions. It's important to note that `manager_llm` is mandatory when using a hierarchical process for ensuring proper execution flow.
|
||||
- **Process Flow (`process`)**: Defines the execution logic (e.g., sequential, hierarchical) to streamline task distribution and execution.
|
||||
- **Verbose Logging (`verbose`)**: Offers detailed logging capabilities for monitoring and debugging purposes. It supports both integer and boolean types to indicate the verbosity level.
|
||||
- **Configuration (`config`)**: Allows extensive customization to tailor the crew's behavior according to specific requirements.
|
||||
- **Rate Limiting (`max_rpm`)**: Ensures efficient utilization of resources by limiting requests per minute.
|
||||
- **Internationalization Support (`language`)**: Facilitates operation in multiple languages, enhancing global usability.
|
||||
- **Execution and Output Handling (`full_output`)**: Distinguishes between full and final outputs for nuanced control over task results.
|
||||
- **Callback and Telemetry (`step_callback`)**: Integrates callbacks for step-wise execution monitoring and telemetry for performance analytics.
|
||||
- **Crew Sharing (`share_crew`)**: Enables sharing of crew information with CrewAI for continuous improvement and training models.
|
||||
- **Usage Metrics (`usage_metrics`)**: Store all metrics for the language model (LLM) usage during all tasks' execution, providing insights into operational efficiency and areas for improvement, you can check it after the crew execution.
|
||||
|
||||
## Delegation: Dividing to Conquer
|
||||
Delegation enhances functionality by allowing agents to intelligently assign tasks or seek help, thereby amplifying the crew's overall capability.
|
||||
|
||||
## Implementing Collaboration and Delegation
|
||||
Setting up a crew involves defining the roles and capabilities of each agent. CrewAI seamlessly manages their interactions, ensuring efficient collaboration and delegation.
|
||||
Setting up a crew involves defining the roles and capabilities of each agent. CrewAI seamlessly manages their interactions, ensuring efficient collaboration and delegation, with enhanced customization and monitoring features to adapt to various operational needs.
|
||||
|
||||
## Example Scenario
|
||||
Imagine a crew with a researcher agent tasked with data gathering and a writer agent responsible for compiling reports. The writer can delegate research tasks or ask questions to the researcher, facilitating a seamless workflow.
|
||||
Consider a crew with a researcher agent tasked with data gathering and a writer agent responsible for compiling reports. The integration of advanced language model management and process flow attributes allows for more sophisticated interactions, such as the writer delegating complex research tasks to the researcher or querying specific information, thereby facilitating a seamless workflow.
|
||||
|
||||
## Conclusion
|
||||
Collaboration and delegation are pivotal, transforming individual AI agents into a coherent, intelligent crew capable of tackling complex tasks. CrewAI's framework not only simplifies these interactions but enhances their effectiveness, paving the way for sophisticated AI-driven solutions.
|
||||
The integration of advanced attributes and functionalities into the CrewAI framework significantly enriches the agent collaboration ecosystem. These enhancements not only simplify interactions but also offer unprecedented flexibility and control, paving the way for sophisticated AI-driven solutions capable of tackling complex tasks through intelligent collaboration and delegation.
|
||||
@@ -9,19 +9,23 @@ description: Understanding and utilizing crews in the crewAI framework.
|
||||
|
||||
## Crew Attributes
|
||||
|
||||
| Attribute | Description |
|
||||
| :------------------- | :----------------------------------------------------------- |
|
||||
| **Tasks** | A list of tasks assigned to the crew. |
|
||||
| **Agents** | A list of agents that are part of the crew. |
|
||||
| **Process** | The process flow (e.g., sequential, hierarchical) the crew follows. |
|
||||
| **Verbose** | The verbosity level for logging during execution. |
|
||||
| **Manager LLM** | The language model used by the manager agent in a hierarchical process. |
|
||||
| **Config** | Configuration settings for the crew. |
|
||||
| **Max RPM** | Maximum requests per minute the crew adheres to during execution. |
|
||||
| **Language** | Language setting for the crew's operation. |
|
||||
| Attribute | Description |
|
||||
| :---------------------- | :----------------------------------------------------------- |
|
||||
| **Tasks** | A list of tasks assigned to the crew. |
|
||||
| **Agents** | A list of agents that are part of the crew. |
|
||||
| **Process** *(optional)* | The process flow (e.g., sequential, hierarchical) the crew follows. |
|
||||
| **Verbose** *(optional)* | The verbosity level for logging during execution. |
|
||||
| **Manager LLM** *(optional)* | The language model used by the manager agent in a hierarchical process. **Required when using a hierarchical process.** |
|
||||
| **Function Calling LLM** *(optional)* | If passed, the crew will use this LLM to do function calling for tools for all agents in the crew. Each agent can have its own LLM, which overrides the crew's LLM for function calling. |
|
||||
| **Config** *(optional)* | Optional configuration settings for the crew, in `Json` or `Dict[str, Any]` format. |
|
||||
| **Max RPM** *(optional)* | Maximum requests per minute the crew adheres to during execution. |
|
||||
| **Language** *(optional)* | Language used for the crew, defaults to English. |
|
||||
| **Full Output** *(optional)* | Whether the crew should return the full output with all tasks outputs or just the final output. |
|
||||
| **Step Callback** *(optional)* | A function that is called after each step of every agent. This can be used to log the agent's actions or to perform other operations; it won't override the agent-specific `step_callback`. |
|
||||
| **Share Crew** *(optional)* | Whether you want to share the complete crew information and execution with the crewAI team to make the library better, and allow us to train models. |
|
||||
|
||||
!!! note "Crew Max RPM"
|
||||
The `max_rpm` attribute sets the maximum number of requests per minute the crew can perform to avoid rate limits and will override individual agents `max_rpm` settings if you set it.
|
||||
The `max_rpm` attribute sets the maximum number of requests per minute the crew can perform to avoid rate limits and will override individual agents' `max_rpm` settings if you set it.
|
||||
|
||||
## Creating a Crew
|
||||
|
||||
@@ -43,26 +47,45 @@ researcher = Agent(
|
||||
|
||||
writer = Agent(
|
||||
role='Content Writer',
|
||||
goal='Write engaging articles on AI discoveries'
|
||||
goal='Write engaging articles on AI discoveries',
|
||||
verbose=True
|
||||
)
|
||||
|
||||
# Create tasks for the agents
|
||||
research_task = Task(description='Identify breakthrough AI technologies', agent=researcher)
|
||||
write_article_task = Task(description='Draft an article on the latest AI technologies', agent=writer)
|
||||
research_task = Task(
|
||||
description='Identify breakthrough AI technologies',
|
||||
agent=researcher
|
||||
)
|
||||
write_article_task = Task(
|
||||
description='Draft an article on the latest AI technologies',
|
||||
agent=writer
|
||||
)
|
||||
|
||||
# Assemble the crew with a sequential process
|
||||
my_crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
tasks=[research_task, write_article_task],
|
||||
process=Process.sequential,
|
||||
verbose=True
|
||||
full_output=True,
|
||||
verbose=True,
|
||||
)
|
||||
```
|
||||
|
||||
## Crew Usage Metrics
|
||||
|
||||
After the crew execution, you can access the `usage_metrics` attribute to view the language model (LLM) usage metrics for all tasks executed by the crew. This provides insights into operational efficiency and areas for improvement.
|
||||
|
||||
```python
|
||||
# Access the crew's usage metrics
|
||||
crew = Crew(agents=[agent1, agent2], tasks=[task1, task2])
|
||||
crew.kickoff()
|
||||
print(crew.usage_metrics)
|
||||
```
|
||||
|
||||
## Crew Execution Process
|
||||
|
||||
- **Sequential Process**: Tasks are executed one after another, allowing for a linear flow of work.
|
||||
- **Hierarchical Process**: A manager agent coordinates the crew, delegating tasks and validating outcomes before proceeding.
|
||||
- **Hierarchical Process**: A manager agent coordinates the crew, delegating tasks and validating outcomes before proceeding. **Note**: A `manager_llm` is required for this process and it's essential for validating the process flow.
|
||||
|
||||
### Kicking Off a Crew
|
||||
|
||||
@@ -72,4 +95,4 @@ Once your crew is assembled, initiate the workflow with the `kickoff()` method.
|
||||
# Start the crew's task execution
|
||||
result = my_crew.kickoff()
|
||||
print(result)
|
||||
```
|
||||
```
|
||||
|
||||
@@ -1,48 +1,60 @@
|
||||
---
|
||||
title: Managing Processes in CrewAI
|
||||
description: An overview of workflow management through processes in CrewAI.
|
||||
description: Detailed guide on workflow management through processes in CrewAI, with updated implementation details.
|
||||
---
|
||||
|
||||
## Understanding Processes
|
||||
!!! note "Core Concept"
|
||||
Processes in CrewAI orchestrate how tasks are executed by agents, akin to project management in human teams. They ensure tasks are distributed and completed efficiently, according to a predefined game plan.
|
||||
In CrewAI, processes orchestrate the execution of tasks by agents, akin to project management in human teams. These processes ensure tasks are distributed and executed efficiently, in alignment with a predefined strategy.
|
||||
|
||||
## Process Implementations
|
||||
|
||||
- **Sequential**: Executes tasks one after another, ensuring a linear and orderly progression.
|
||||
- **Hierarchical**: Implements a chain of command, where tasks are delegated and executed based on a managerial structure.
|
||||
- **Consensual (WIP)**: Future process type aiming for collaborative decision-making among agents on task execution.
|
||||
- **Sequential**: Executes tasks sequentially, ensuring tasks are completed in an orderly progression.
|
||||
- **Hierarchical**: Organizes tasks in a managerial hierarchy, where tasks are delegated and executed based on a structured chain of command. Note: A manager language model (`manager_llm`) must be specified in the crew to enable the hierarchical process, allowing for the creation and management of tasks by the manager.
|
||||
- **Consensual Process (Planned)**: Currently under consideration for future development, this process type aims for collaborative decision-making among agents on task execution, introducing a more democratic approach to task management within CrewAI. As of now, it is not implemented in the codebase.
|
||||
|
||||
## The Role of Processes in Teamwork
|
||||
Processes transform individual agents into a unified team, coordinating their efforts to achieve common goals with efficiency and harmony.
|
||||
Processes enable individual agents to operate as a cohesive unit, streamlining their efforts to achieve common objectives with efficiency and coherence.
|
||||
|
||||
## Assigning Processes to a Crew
|
||||
Specify the process during crew creation to determine the execution strategy:
|
||||
To assign a process to a crew, specify the process type upon crew creation to set the execution strategy. Note: For a hierarchical process, ensure to define `manager_llm` for the manager agent.
|
||||
|
||||
```python
|
||||
from crewai import Crew
|
||||
from crewai.process import Process
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
# Example: Creating a crew with a sequential process
|
||||
crew = Crew(agents=my_agents, tasks=my_tasks, process=Process.sequential)
|
||||
crew = Crew(
|
||||
agents=my_agents,
|
||||
tasks=my_tasks,
|
||||
process=Process.sequential
|
||||
)
|
||||
|
||||
# Example: Creating a crew with a hierarchical process
|
||||
crew = Crew(agents=my_agents, tasks=my_tasks, process=Process.hierarchical)
|
||||
# Ensure to provide a manager_llm
|
||||
crew = Crew(
|
||||
agents=my_agents,
|
||||
tasks=my_tasks,
|
||||
process=Process.hierarchical,
|
||||
manager_llm=ChatOpenAI(model="gpt-4")
|
||||
)
|
||||
```
|
||||
**Note:** Ensure `my_agents` and `my_tasks` are defined prior to creating a `Crew` object, and for the hierarchical process, `manager_llm` is also required.
|
||||
|
||||
## Sequential Process
|
||||
Ensures a natural flow of work, mirroring human team dynamics by progressing through tasks thoughtfully and systematically.
|
||||
This method mirrors dynamic team workflows, progressing through tasks in a thoughtful and systematic manner. Task execution follows the predefined order in the task list, with the output of one task serving as context for the next.
|
||||
|
||||
Tasks need to be pre-assigned to agents, and the order of execution is determined by the order of the tasks in the list.
|
||||
|
||||
Tasks are executed one after another, ensuring a linear and orderly progression and the output of one task is automatically used as context into the next task.
|
||||
|
||||
You can also define specific task's outputs that should be used as context for another task by using the `context` parameter in the `Task` class.
|
||||
To customize task context, utilize the `context` parameter in the `Task` class to specify outputs that should be used as context for subsequent tasks.
|
||||
|
||||
## Hierarchical Process
|
||||
Mimics a corporate hierarchy, where a manager oversees task execution, planning, delegation, and validation, enhancing task coordination.
|
||||
Emulates a corporate hierarchy, crewAI creates a manager automatically for you, requiring the specification of a manager language model (`manager_llm`) for the manager agent. This agent oversees task execution, including planning, delegation, and validation. Tasks are not pre-assigned; the manager allocates tasks to agents based on their capabilities, reviews outputs, and assesses task completion.
|
||||
|
||||
In this process tasks don't need to be pre-assigned to agents, the manager will decide which agent will perform each task, review the output and decide if the task is completed or not.
|
||||
## Process Class: Detailed Overview
|
||||
The `Process` class is implemented as an enumeration (`Enum`), ensuring type safety and restricting process values to the defined types (`sequential`, `hierarchical`, and future `consensual`). This design choice guarantees that only valid processes are utilized within the CrewAI framework.
|
||||
|
||||
## Planned Future Processes
|
||||
- **Consensual Process**: This collaborative decision-making process among agents on task execution is under consideration but not currently implemented. This future enhancement aims to introduce a more democratic approach to task management within CrewAI.
|
||||
|
||||
## Conclusion
|
||||
Processes are vital for structured collaboration within CrewAI, enabling agents to work together systematically. Future updates will introduce new processes, further mimicking the adaptability and complexity of human teamwork.
|
||||
The structured collaboration facilitated by processes within CrewAI is crucial for enabling systematic teamwork among agents. Documentation will be regularly updated to reflect new processes and enhancements, ensuring users have access to the most current and comprehensive information.
|
||||
@@ -5,36 +5,39 @@ description: Overview and management of tasks within the crewAI framework.
|
||||
|
||||
## Overview of a Task
|
||||
!!! note "What is a Task?"
|
||||
In the CrewAI framework, tasks are individual assignments that agents complete. They encapsulate necessary information for execution, including a description, assigned agent, and required tools, offering flexibility for various action complexities.
|
||||
In the CrewAI framework, tasks are individual assignments that agents complete. They encapsulate necessary information for execution, including a description, assigned agent, required tools, offering flexibility for various action complexities.
|
||||
|
||||
Tasks in CrewAI can be designed to require collaboration between agents. For example, one agent might gather data while another analyzes it. This collaborative approach can be defined within the task properties and managed by the Crew's process.
|
||||
|
||||
## Task Attributes
|
||||
|
||||
| Attribute | Description |
|
||||
| :---------- | :----------------------------------- |
|
||||
| :------------- | :----------------------------------- |
|
||||
| **Description** | A clear, concise statement of what the task entails. |
|
||||
| **Agent** | Optionally, you can specify which agent is responsible for the task. If not, the crew's process will determine who takes it on. |
|
||||
| **Expected Output** *(optional)* | Clear and detailed definition of expected output for the task. |
|
||||
| **Expected Output** | Clear and detailed definition of expected output for the task. |
|
||||
| **Tools** *(optional)* | These are the functions or capabilities the agent can utilize to perform the task. They can be anything from simple actions like 'search' to more complex interactions with other agents or APIs. |
|
||||
| **Async Execution** *(optional)* | If the task should be executed asynchronously. |
|
||||
| **Context** *(optional)* | Other tasks that will have their output used as context for this task, if one is an asynchronous task it will wait for that to finish |
|
||||
| **Async Execution** *(optional)* | Indicates whether the task should be executed asynchronously, allowing the crew to continue with the next task without waiting for completion. |
|
||||
| **Context** *(optional)* | Other tasks that will have their output used as context for this task. If a task is asynchronous, the system will wait for that to finish before using its output as context. |
|
||||
| **Output JSON** *(optional)* | Takes a pydantic model and returns the output as a JSON object. **Agent LLM needs to be using an OpenAI client, could be Ollama for example but using the OpenAI wrapper** |
|
||||
| **Output Pydantic** *(optional)* | Takes a pydantic model and returns the output as a pydantic object. **Agent LLM needs to be using an OpenAI client, could be Ollama for example but using the OpenAI wrapper** |
|
||||
| **Output File** *(optional)* | Takes a file path and saves the output of the task on it. |
|
||||
| **Callback** *(optional)* | A function to be executed after the task is completed. |
|
||||
|
||||
## Creating a Task
|
||||
|
||||
This is the simpliest example for creating a task, it involves defining its scope and agent, but there are optional attributes that can provide a lot of flexibility:
|
||||
This is the simplest example for creating a task, it involves defining its scope and agent, but there are optional attributes that can provide a lot of flexibility:
|
||||
|
||||
```python
|
||||
from crewai import Task
|
||||
|
||||
task = Task(
|
||||
description='Find and summarize the latest and most relevant news on AI',
|
||||
agent=sales_agent
|
||||
description='Find and summarize the latest and most relevant news on AI',
|
||||
agent=sales_agent
|
||||
)
|
||||
```
|
||||
!!! note "Task Assignment"
|
||||
Tasks can be assigned directly by specifying an `agent` to them, or they can be assigned in run time if you are using the `hierarchical` through CrewAI's process, considering roles, availability, or other criteria.
|
||||
Tasks can be assigned directly by specifying an `agent` to them, or they can be assigned in run time if you are using the `hierarchical` through CrewAI's process, considering roles, availability, or other criteria.
|
||||
|
||||
## Integrating Tools with Tasks
|
||||
|
||||
@@ -45,35 +48,33 @@ Tools from the [crewAI Toolkit](https://github.com/joaomdmoura/crewai-tools) and
|
||||
```python
|
||||
import os
|
||||
os.environ["OPENAI_API_KEY"] = "Your Key"
|
||||
os.environ["SERPER_API_KEY"] = "Your Key" # serper.dev API key
|
||||
|
||||
from crewai import Agent, Task, Crew
|
||||
from langchain.agents import Tool
|
||||
from langchain_community.tools import DuckDuckGoSearchRun
|
||||
from crewai_tools import SerperDevTool
|
||||
|
||||
research_agent = Agent(
|
||||
role='Researcher',
|
||||
goal='Find and summarize the latest AI news',
|
||||
backstory="""You're a researcher at a large company.
|
||||
You're responsible for analyzing data and providing insights
|
||||
to the business."""
|
||||
verbose=True
|
||||
role='Researcher',
|
||||
goal='Find and summarize the latest AI news',
|
||||
backstory="""You're a researcher at a large company.
|
||||
You're responsible for analyzing data and providing insights
|
||||
to the business."""
|
||||
verbose=True
|
||||
)
|
||||
|
||||
# Install duckduckgo-search for this example:
|
||||
# !pip install -U duckduckgo-search
|
||||
search_tool = DuckDuckGoSearchRun()
|
||||
search_tool = SerperDevTool()
|
||||
|
||||
task = Task(
|
||||
description='Find and summarize the latest AI news',
|
||||
expected_output='A bullet list summary of the top 5 most important AI news',
|
||||
agent=research_agent,
|
||||
expected_output='A bullet list summary of the top 5 most important AI news',
|
||||
agent=research_agent,
|
||||
tools=[search_tool]
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[research_agent],
|
||||
tasks=[task],
|
||||
verbose=2
|
||||
agents=[research_agent],
|
||||
tasks=[task],
|
||||
verbose=2
|
||||
)
|
||||
|
||||
result = crew.kickoff()
|
||||
@@ -82,27 +83,36 @@ print(result)
|
||||
|
||||
This demonstrates how tasks with specific tools can override an agent's default set for tailored task execution.
|
||||
|
||||
## Refering other Tasks
|
||||
## Referring to Other Tasks
|
||||
|
||||
In crewAI the output of one task is automatically relayed into the next one, but you can specifically define what tasks output should be used as context for another task.
|
||||
In crewAI, the output of one task is automatically relayed into the next one, but you can specifically define what tasks' output, including multiple should be used as context for another task.
|
||||
|
||||
This is useful when you have a task that depends on the output of another task that is not performed immediately after it. This is done through the `context` attribute of the task:
|
||||
|
||||
```python
|
||||
# ...
|
||||
|
||||
research_task = Task(
|
||||
research_ai_task = Task(
|
||||
description='Find and summarize the latest AI news',
|
||||
expected_output='A bullet list summary of the top 5 most important AI news',
|
||||
agent=research_agent,
|
||||
expected_output='A bullet list summary of the top 5 most important AI news',
|
||||
async_execution=True,
|
||||
agent=research_agent,
|
||||
tools=[search_tool]
|
||||
)
|
||||
|
||||
research_ops_task = Task(
|
||||
description='Find and summarize the latest AI Ops news',
|
||||
expected_output='A bullet list summary of the top 5 most important AI Ops news',
|
||||
async_execution=True,
|
||||
agent=research_agent,
|
||||
tools=[search_tool]
|
||||
)
|
||||
|
||||
write_blog_task = Task(
|
||||
description="Write a full blog post about the importante of AI and it's latest news",
|
||||
expected_output='Full blog post that is 4 paragraphs long',
|
||||
agent=writer_agent,
|
||||
context=[research_task]
|
||||
description="Write a full blog post about the importance of AI and its latest news",
|
||||
expected_output='Full blog post that is 4 paragraphs long',
|
||||
agent=writer_agent,
|
||||
context=[research_ai_task, research_ops_task]
|
||||
)
|
||||
|
||||
#...
|
||||
@@ -110,7 +120,7 @@ write_blog_task = Task(
|
||||
|
||||
## Asynchronous Execution
|
||||
|
||||
You can define a task to be executed asynchronously, this means that the crew will not wait for it to be completed to continue with the next task. This is useful for tasks that take a long time to be completed, or that are not crucial for the next tasks to be performed.
|
||||
You can define a task to be executed asynchronously. This means that the crew will not wait for it to be completed to continue with the next task. This is useful for tasks that take a long time to be completed, or that are not crucial for the next tasks to be performed.
|
||||
|
||||
You can then use the `context` attribute to define in a future task that it should wait for the output of the asynchronous task to be completed.
|
||||
|
||||
@@ -118,7 +128,7 @@ You can then use the `context` attribute to define in a future task that it shou
|
||||
#...
|
||||
|
||||
list_ideas = Task(
|
||||
description="List of 5 interesting ideas to explore for na article about AI.",
|
||||
description="List of 5 interesting ideas to explore for an article about AI.",
|
||||
expected_output="Bullet point list of 5 ideas for an article.",
|
||||
agent=researcher,
|
||||
async_execution=True # Will be executed asynchronously
|
||||
@@ -132,7 +142,7 @@ list_important_history = Task(
|
||||
)
|
||||
|
||||
write_article = Task(
|
||||
description="Write an article about AI, it's history and interesting ideas.",
|
||||
description="Write an article about AI, its history, and interesting ideas.",
|
||||
expected_output="A 4 paragraph article about AI.",
|
||||
agent=writer,
|
||||
context=[list_ideas, list_important_history] # Will wait for the output of the two tasks to be completed
|
||||
@@ -143,7 +153,7 @@ write_article = Task(
|
||||
|
||||
## Callback Mechanism
|
||||
|
||||
You can define a callback function that will be executed after the task is completed. This is useful for tasks that need to trigger some side effect after they are completed, while the crew is still running.
|
||||
The callback function is executed after the task is completed, allowing for actions or notifications to be triggered based on the task's outcome.
|
||||
|
||||
```python
|
||||
# ...
|
||||
@@ -154,7 +164,7 @@ def callback_function(output: TaskOutput):
|
||||
print(f"""
|
||||
Task completed!
|
||||
Task: {output.description}
|
||||
Output: {output.result}
|
||||
Output: {output.raw_output}
|
||||
""")
|
||||
|
||||
research_task = Task(
|
||||
@@ -168,7 +178,7 @@ research_task = Task(
|
||||
#...
|
||||
```
|
||||
|
||||
## Accessing a specific Task Output
|
||||
## Accessing a Specific Task Output
|
||||
|
||||
Once a crew finishes running, you can access the output of a specific task by using the `output` attribute of the task object:
|
||||
|
||||
@@ -195,18 +205,23 @@ result = crew.kickoff()
|
||||
print(f"""
|
||||
Task completed!
|
||||
Task: {task1.output.description}
|
||||
Output: {task1.output.result}
|
||||
Output: {task1.output.raw_output}
|
||||
""")
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
## Tool Override Mechanism
|
||||
|
||||
Specifying tools in a task allows for dynamic adaptation of agent capabilities, emphasizing CrewAI's flexibility.
|
||||
|
||||
## Error Handling and Validation Mechanisms
|
||||
|
||||
While creating and executing tasks, certain validation mechanisms are in place to ensure the robustness and reliability of task attributes. These include but are not limited to:
|
||||
|
||||
- Ensuring only one output type is set per task to maintain clear output expectations.
|
||||
- Preventing the manual assignment of the `id` attribute to uphold the integrity of the unique identifier system.
|
||||
|
||||
These validations help in maintaining the consistency and reliability of task executions within the crewAI framework.
|
||||
|
||||
## Conclusion
|
||||
|
||||
Tasks are the driving force behind the actions of agents in crewAI. By properly defining tasks and their outcomes, you set the stage for your AI agents to work effectively, either independently or as a collaborative unit.
|
||||
Equipping tasks with appropriate tools is crucial for maximizing CrewAI's potential, ensuring agents are effectively prepared for their assignments.
|
||||
Tasks are the driving force behind the actions of agents in crewAI. By properly defining tasks and their outcomes, you set the stage for your AI agents to work effectively, either independently or as a collaborative unit. Equipping tasks with appropriate tools, understanding the execution process, and following robust validation practices are crucial for maximizing CrewAI's potential, ensuring agents are effectively prepared for their assignments and that tasks are executed as intended.
|
||||
@@ -1,65 +1,211 @@
|
||||
---
|
||||
title: crewAI Tools
|
||||
description: Understanding and leveraging tools within the crewAI framework.
|
||||
description: Understanding and leveraging tools within the crewAI framework for agent collaboration and task execution.
|
||||
---
|
||||
|
||||
## Introduction
|
||||
CrewAI tools empower agents with capabilities ranging from web searching and data analysis to collaboration and delegating tasks among coworkers. This documentation outlines how to create, integrate, and leverage these tools within the CrewAI framework, including a new focus on collaboration tools.
|
||||
|
||||
## What is a Tool?
|
||||
!!! note "Definition"
|
||||
A tool in CrewAI, is a skill, something Agents can use perform tasks, right now those can be tools from the [crewAI Toolkit](https://github.com/joaomdmoura/crewai-tools) and [LangChain Tools](https://python.langchain.com/docs/integrations/tools), those are basically functions that an agent can utilize for various actions, from simple searches to complex interactions with external systems.
|
||||
A tool in CrewAI is a skill or function that agents can utilize to perform various actions. This includes tools from the [crewAI Toolkit](https://github.com/joaomdmoura/crewai-tools) and [LangChain Tools](https://python.langchain.com/docs/integrations/tools), enabling everything from simple searches to complex interactions and effective teamwork among agents.
|
||||
|
||||
## Key Characteristics of Tools
|
||||
|
||||
- **Utility**: Designed for specific tasks such as web searching, data analysis, or content generation.
|
||||
- **Integration**: Enhance agent capabilities by integrating tools directly into their workflow.
|
||||
- **Customizability**: Offers the flexibility to develop custom tools or use existing ones from LangChain's ecosystem.
|
||||
- **Utility**: Crafted for tasks such as web searching, data analysis, content generation, and agent collaboration.
|
||||
- **Integration**: Boosts agent capabilities by seamlessly integrating tools into their workflow.
|
||||
- **Customizability**: Provides the flexibility to develop custom tools or utilize existing ones, catering to the specific needs of agents.
|
||||
|
||||
## Using crewAI Tools
|
||||
|
||||
To enhance your agents' capabilities with crewAI tools, begin by installing our extra tools package:
|
||||
|
||||
```bash
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
Here's an example demonstrating their use:
|
||||
|
||||
```python
|
||||
import os
|
||||
from crewai import Agent, Task, Crew
|
||||
# Importing crewAI tools
|
||||
from crewai_tools import (
|
||||
DirectoryReadTool,
|
||||
FileReadTool,
|
||||
SerperDevTool,
|
||||
WebsiteSearchTool
|
||||
)
|
||||
|
||||
# Set up API keys
|
||||
os.environ["SERPER_API_KEY"] = "Your Key" # serper.dev API key
|
||||
os.environ["OPENAI_API_KEY"] = "Your Key"
|
||||
|
||||
# Instantiate tools
|
||||
docs_tool = DirectoryReadTool(directory='./blog-posts')
|
||||
file_tool = FileReadTool()
|
||||
search_tool = SerperDevTool()
|
||||
web_rag_tool = WebsiteSearchTool()
|
||||
|
||||
# Create agents
|
||||
researcher = Agent(
|
||||
role='Market Research Analyst',
|
||||
goal='Provide up-to-date market analysis of the AI industry',
|
||||
backstory='An expert analyst with a keen eye for market trends.',
|
||||
tools=[search_tool, web_rag_tool],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
writer = Agent(
|
||||
role='Content Writer',
|
||||
goal='Craft engaging blog posts about the AI industry',
|
||||
backstory='A skilled writer with a passion for technology.',
|
||||
tools=[docs_tool, file_tool],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
# Define tasks
|
||||
research = Task(
|
||||
description='Research the latest trends in the AI industry and provide a summary.',
|
||||
expected_output='A summary of the top 3 trending developments in the AI industry with a unique perspective on their significance.',
|
||||
agent=researcher
|
||||
)
|
||||
|
||||
write = Task(
|
||||
description='Write an engaging blog post about the AI industry, based on the research analyst’s summary. Draw inspiration from the latest blog posts in the directory.',
|
||||
expected_output='A 4-paragraph blog post formatted in markdown with engaging, informative, and accessible content, avoiding complex jargon.',
|
||||
agent=writer,
|
||||
output_file='blog-posts/new_post.md' # The final blog post will be saved here
|
||||
)
|
||||
|
||||
# Assemble a crew
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
tasks=[research, write],
|
||||
verbose=2
|
||||
)
|
||||
|
||||
# Execute tasks
|
||||
crew.kickoff()
|
||||
```
|
||||
|
||||
## Available crewAI Tools
|
||||
|
||||
Most of the tools in the crewAI toolkit offer the ability to set specific arguments or let them to be more wide open, this is the case for most of the tools, for example:
|
||||
|
||||
```python
|
||||
from crewai_tools import DirectoryReadTool
|
||||
|
||||
# This will allow the agent with this tool to read any directory it wants during it's execution
|
||||
tool = DirectoryReadTool()
|
||||
|
||||
# OR
|
||||
|
||||
# This will allow the agent with this tool to read only the directory specified during it's execution
|
||||
toos = DirectoryReadTool(directory='./directory')
|
||||
```
|
||||
|
||||
Specific per tool docs are coming soon.
|
||||
Here is a list of the available tools and their descriptions:
|
||||
|
||||
| Tool | Description |
|
||||
| :-------------------------- | :-------------------------------------------------------------------------------------------- |
|
||||
| **CodeDocsSearchTool** | A RAG tool optimized for searching through code documentation and related technical documents.|
|
||||
| **CSVSearchTool** | A RAG tool designed for searching within CSV files, tailored to handle structured data. |
|
||||
| **DirectorySearchTool** | A RAG tool for searching within directories, useful for navigating through file systems. |
|
||||
| **DOCXSearchTool** | A RAG tool aimed at searching within DOCX documents, ideal for processing Word files. |
|
||||
| **DirectoryReadTool** | Facilitates reading and processing of directory structures and their contents. |
|
||||
| **FileReadTool** | Enables reading and extracting data from files, supporting various file formats. |
|
||||
| **GithubSearchTool** | A RAG tool for searching within GitHub repositories, useful for code and documentation search.|
|
||||
| **SeperDevTool** | A specialized tool for development purposes, with specific functionalities under development. |
|
||||
| **TXTSearchTool** | A RAG tool focused on searching within text (.txt) files, suitable for unstructured data. |
|
||||
| **JSONSearchTool** | A RAG tool designed for searching within JSON files, catering to structured data handling. |
|
||||
| **MDXSearchTool** | A RAG tool tailored for searching within Markdown (MDX) files, useful for documentation. |
|
||||
| **PDFSearchTool** | A RAG tool aimed at searching within PDF documents, ideal for processing scanned documents. |
|
||||
| **PGSearchTool** | A RAG tool optimized for searching within PostgreSQL databases, suitable for database queries. |
|
||||
| **RagTool** | A general-purpose RAG tool capable of handling various data sources and types. |
|
||||
| **ScrapeElementFromWebsiteTool** | Enables scraping specific elements from websites, useful for targeted data extraction. |
|
||||
| **ScrapeWebsiteTool** | Facilitates scraping entire websites, ideal for comprehensive data collection. |
|
||||
| **WebsiteSearchTool** | A RAG tool for searching website content, optimized for web data extraction. |
|
||||
| **XMLSearchTool** | A RAG tool designed for searching within XML files, suitable for structured data formats. |
|
||||
| **YoutubeChannelSearchTool**| A RAG tool for searching within YouTube channels, useful for video content analysis. |
|
||||
| **YoutubeVideoSearchTool** | A RAG tool aimed at searching within YouTube videos, ideal for video data extraction. |
|
||||
|
||||
## Creating your own Tools
|
||||
!!! example "Custom Tool Creation"
|
||||
Developers can craft custom tools tailored for their agent’s needs or utilize pre-built options. Here’s how to create one:
|
||||
Developers can craft custom tools tailored for their agent’s needs or utilize pre-built options:
|
||||
|
||||
To create your own crewAI tools you will need to install our extra tools package:
|
||||
|
||||
```bash
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
Once you do that there are two main ways for one to create a crewAI tool:
|
||||
|
||||
### Subclassing `BaseTool`
|
||||
|
||||
```python
|
||||
from crewai_tools import BaseTool
|
||||
|
||||
class MyCustomTool(BaseTool):
|
||||
name: str = "Name of my tool"
|
||||
description: str = "Clear description for what this tool is useful for, you agent will need this information to use it."
|
||||
|
||||
def _run(self, argument: str) -> str:
|
||||
# Implementation goes here
|
||||
return "Result from custom tool"
|
||||
```
|
||||
|
||||
Define a new class inheriting from `BaseTool`, specifying `name`, `description`, and the `_run` method for operational logic.
|
||||
|
||||
|
||||
### Utilizing the `tool` Decorator
|
||||
|
||||
For a simpler approach, create a `Tool` object directly with the required attributes and a functional logic.
|
||||
|
||||
```python
|
||||
from crewai_tools import tool
|
||||
@tool("Name of my tool")
|
||||
def my_tool(question: str) -> str:
|
||||
"""Clear description for what this tool is useful for, you agent will need this information to use it."""
|
||||
# Function logic here
|
||||
```
|
||||
|
||||
```python
|
||||
import json
|
||||
import requests
|
||||
from crewai import Agent
|
||||
from langchain.tools import tool
|
||||
from crewai.tools import tool
|
||||
from unstructured.partition.html import partition_html
|
||||
|
||||
class BrowserTools():
|
||||
|
||||
# Anotate the fuction with the tool decorator from LangChain
|
||||
@tool("Scrape website content")
|
||||
def scrape_website(website):
|
||||
# Write logic for the tool.
|
||||
# In this case a function to scrape website content
|
||||
url = f"https://chrome.browserless.io/content?token={config('BROWSERLESS_API_KEY')}"
|
||||
payload = json.dumps({"url": website})
|
||||
headers = {'cache-control': 'no-cache', 'content-type': 'application/json'}
|
||||
response = requests.request("POST", url, headers=headers, data=payload)
|
||||
elements = partition_html(text=response.text)
|
||||
content = "\n\n".join([str(el) for el in elements])
|
||||
return content[:5000]
|
||||
# Annotate the function with the tool decorator from crewAI
|
||||
@tool("Integration with a given API")
|
||||
def integration_tool(argument: str) -> str:
|
||||
"""Integration with a given API"""
|
||||
# Code here
|
||||
return resutls # string to be sent back to the agent
|
||||
|
||||
# Assign the scraping tool to an agent
|
||||
agent = Agent(
|
||||
role='Research Analyst',
|
||||
goal='Provide up-to-date market analysis',
|
||||
backstory='An expert analyst with a keen eye for market trends.',
|
||||
tools=[BrowserTools().scrape_website]
|
||||
role='Research Analyst',
|
||||
goal='Provide up-to-date market analysis',
|
||||
backstory='An expert analyst with a keen eye for market trends.',
|
||||
tools=[integration_tool]
|
||||
)
|
||||
```
|
||||
|
||||
## Using LangChain Tools
|
||||
!!! info "LangChain Integration"
|
||||
CrewAI seamlessly integrates with LangChain’s comprehensive toolkit. Assigning an existing tool to an agent is straightforward:
|
||||
CrewAI seamlessly integrates with LangChain’s comprehensive toolkit for search-based queries and more, here are the available built-in tools that are offered by Langchain [LangChain Toolkit](https://python.langchain.com/docs/integrations/tools/)
|
||||
:
|
||||
|
||||
```python
|
||||
from crewai import Agent
|
||||
from langchain.agents import Tool
|
||||
from langchain.utilities import GoogleSerperAPIWrapper
|
||||
import os
|
||||
|
||||
# Setup API keys
|
||||
os.environ["OPENAI_API_KEY"] = "Your Key"
|
||||
os.environ["SERPER_API_KEY"] = "Your Key"
|
||||
|
||||
search = GoogleSerperAPIWrapper()
|
||||
@@ -77,7 +223,9 @@ agent = Agent(
|
||||
backstory='An expert analyst with a keen eye for market trends.',
|
||||
tools=[serper_tool]
|
||||
)
|
||||
|
||||
# rest of the code ...
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
Tools are crucial for extending the capabilities of CrewAI agents, allowing them to undertake a diverse array of tasks and collaborate efficiently. When building your AI solutions with CrewAI, consider both custom and existing tools to empower your agents and foster a dynamic AI ecosystem.
|
||||
Tools are pivotal in extending the capabilities of CrewAI agents, enabling them to undertake a broad spectrum of tasks and collaborate effectively. When building solutions with CrewAI, leverage both custom and existing tools to empower your agents and enhance the AI ecosystem.
|
||||
|
||||
91
docs/how-to/Create-Custom-Tools.md
Normal file
91
docs/how-to/Create-Custom-Tools.md
Normal file
@@ -0,0 +1,91 @@
|
||||
---
|
||||
title: Creating your own Tools
|
||||
description: Guide on how to create and use custom tools within the crewAI framework.
|
||||
---
|
||||
|
||||
## Creating your own Tools
|
||||
!!! example "Custom Tool Creation"
|
||||
Developers can craft custom tools tailored for their agent’s needs or utilize pre-built options:
|
||||
|
||||
To create your own crewAI tools you will need to install our extra tools package:
|
||||
|
||||
```bash
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
Once you do that there are two main ways for one to create a crewAI tool:
|
||||
|
||||
### Subclassing `BaseTool`
|
||||
|
||||
```python
|
||||
from crewai_tools import BaseTool
|
||||
|
||||
class MyCustomTool(BaseTool):
|
||||
name: str = "Name of my tool"
|
||||
description: str = "Clear description for what this tool is useful for, you agent will need this information to use it."
|
||||
|
||||
def _run(self, argument: str) -> str:
|
||||
# Implementation goes here
|
||||
return "Result from custom tool"
|
||||
```
|
||||
|
||||
Define a new class inheriting from `BaseTool`, specifying `name`, `description`, and the `_run` method for operational logic.
|
||||
|
||||
|
||||
### Utilizing the `tool` Decorator
|
||||
|
||||
For a simpler approach, create a `Tool` object directly with the required attributes and a functional logic.
|
||||
|
||||
```python
|
||||
from crewai_tools import tool
|
||||
@tool("Name of my tool")
|
||||
def my_tool(question: str) -> str:
|
||||
"""Clear description for what this tool is useful for, you agent will need this information to use it."""
|
||||
# Function logic here
|
||||
```
|
||||
|
||||
```python
|
||||
import json
|
||||
import requests
|
||||
from crewai import Agent
|
||||
from crewai.tools import tool
|
||||
from unstructured.partition.html import partition_html
|
||||
|
||||
# Annotate the function with the tool decorator from crewAI
|
||||
@tool("Integration with a given API")
|
||||
def integtation_tool(argument: str) -> str:
|
||||
"""Integration with a given API"""
|
||||
# Code here
|
||||
return resutls # string to be sent back to the agent
|
||||
|
||||
# Assign the scraping tool to an agent
|
||||
agent = Agent(
|
||||
role='Research Analyst',
|
||||
goal='Provide up-to-date market analysis',
|
||||
backstory='An expert analyst with a keen eye for market trends.',
|
||||
tools=[integtation_tool]
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
|
||||
### Using the `Tool` function from langchain
|
||||
|
||||
For another simple approach, create a function in python directly with the required attributes and a functional logic.
|
||||
|
||||
```python
|
||||
def combine(a, b):
|
||||
return a + b
|
||||
```
|
||||
|
||||
Then you can add that function into the your tool by using 'func' variable in the Tool function.
|
||||
|
||||
```python
|
||||
from langchain.agents import Tool
|
||||
|
||||
math_tool = Tool(
|
||||
name="Math tool",
|
||||
func=math_tool,
|
||||
description="Useful for adding two numbers together, in other words combining them."
|
||||
)
|
||||
```
|
||||
@@ -1,112 +1,118 @@
|
||||
---
|
||||
title: Assembling and Activating Your CrewAI Team
|
||||
description: A step-by-step guide to creating a cohesive CrewAI team for your projects.
|
||||
description: A comprehensive guide to creating a dynamic CrewAI team for your projects, with updated functionalities including verbose mode, memory capabilities, and more.
|
||||
---
|
||||
|
||||
## Introduction
|
||||
Embarking on your CrewAI journey involves a few straightforward steps to set up your environment and initiate your AI crew. This guide ensures a seamless start.
|
||||
Embark on your CrewAI journey by setting up your environment and initiating your AI crew with enhanced features. This guide ensures a seamless start, incorporating the latest updates.
|
||||
|
||||
## Step 0: Installation
|
||||
Begin by installing CrewAI and any additional packages required for your project. For instance, the `duckduckgo-search` package is used in this example for enhanced search capabilities.
|
||||
Install CrewAI and any necessary packages for your project.
|
||||
|
||||
```shell
|
||||
pip install crewai
|
||||
pip install duckduckgo-search
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
## Step 1: Assemble Your Agents
|
||||
Begin by defining your agents with distinct roles and backstories. These elements not only add depth but also guide their task execution and interaction within the crew.
|
||||
Define your agents with distinct roles, backstories, and now, enhanced capabilities such as verbose mode and memory usage. These elements add depth and guide their task execution and interaction within the crew.
|
||||
|
||||
```python
|
||||
import os
|
||||
os.environ["SERPER_API_KEY"] = "Your Key" # serper.dev API key
|
||||
os.environ["OPENAI_API_KEY"] = "Your Key"
|
||||
|
||||
from crewai import Agent
|
||||
from crewai_tools import SerperDevTool
|
||||
search_tool = SerperDevTool()
|
||||
|
||||
# Topic that will be used in the crew run
|
||||
topic = 'AI in healthcare'
|
||||
|
||||
# Creating a senior researcher agent
|
||||
# Creating a senior researcher agent with memory and verbose mode
|
||||
researcher = Agent(
|
||||
role='Senior Researcher',
|
||||
goal=f'Uncover groundbreaking technologies around {topic}',
|
||||
goal='Uncover groundbreaking technologies in {topic}',
|
||||
verbose=True,
|
||||
backstory="""Driven by curiosity, you're at the forefront of
|
||||
innovation, eager to explore and share knowledge that could change
|
||||
the world."""
|
||||
memory=True,
|
||||
backstory=(
|
||||
"Driven by curiosity, you're at the forefront of"
|
||||
"innovation, eager to explore and share knowledge that could change"
|
||||
"the world."
|
||||
),
|
||||
tools=[search_tool],
|
||||
allow_delegation=True
|
||||
)
|
||||
|
||||
# Creating a writer agent
|
||||
# Creating a writer agent with custom tools and delegation capability
|
||||
writer = Agent(
|
||||
role='Writer',
|
||||
goal=f'Narrate compelling tech stories around {topic}',
|
||||
goal='Narrate compelling tech stories about {topic}',
|
||||
verbose=True,
|
||||
backstory="""With a flair for simplifying complex topics, you craft
|
||||
engaging narratives that captivate and educate, bringing new
|
||||
discoveries to light in an accessible manner."""
|
||||
memory=True,
|
||||
backstory=(
|
||||
"With a flair for simplifying complex topics, you craft"
|
||||
"engaging narratives that captivate and educate, bringing new"
|
||||
"discoveries to light in an accessible manner."
|
||||
),
|
||||
tools=[search_tool],
|
||||
allow_delegation=False
|
||||
)
|
||||
```
|
||||
|
||||
## Step 2: Define the Tasks
|
||||
Detail the specific objectives for your agents. These tasks guide their focus and ensure a targeted approach to their roles.
|
||||
Detail the specific objectives for your agents, including new features for asynchronous execution and output customization. These tasks ensure a targeted approach to their roles.
|
||||
|
||||
```python
|
||||
from crewai import Task
|
||||
|
||||
# Install duckduckgo-search for this example:
|
||||
# !pip install -U duckduckgo-search
|
||||
|
||||
from langchain_community.tools import DuckDuckGoSearchRun
|
||||
search_tool = DuckDuckGoSearchRun()
|
||||
|
||||
# Research task for identifying AI trends
|
||||
# Research task
|
||||
research_task = Task(
|
||||
description=f"""Identify the next big trend in {topic}.
|
||||
Focus on identifying pros and cons and the overall narrative.
|
||||
|
||||
Your final report should clearly articulate the key points,
|
||||
its market opportunities, and potential risks.
|
||||
""",
|
||||
description=(
|
||||
"Identify the next big trend in {topic}."
|
||||
"Focus on identifying pros and cons and the overall narrative."
|
||||
"Your final report should clearly articulate the key points"
|
||||
"its market opportunities, and potential risks."
|
||||
),
|
||||
expected_output='A comprehensive 3 paragraphs long report on the latest AI trends.',
|
||||
max_inter=3,
|
||||
tools=[search_tool],
|
||||
agent=researcher
|
||||
agent=researcher,
|
||||
)
|
||||
|
||||
# Writing task based on research findings
|
||||
# Writing task with language model configuration
|
||||
write_task = Task(
|
||||
description=f"""Compose an insightful article on {topic}.
|
||||
Focus on the latest trends and how it's impacting the industry.
|
||||
This article should be easy to understand, engaging and positive.
|
||||
""",
|
||||
expected_output=f'A 4 paragraph article on {topic} advancements.',
|
||||
description=(
|
||||
"Compose an insightful article on {topic}."
|
||||
"Focus on the latest trends and how it's impacting the industry."
|
||||
"This article should be easy to understand, engaging, and positive."
|
||||
),
|
||||
expected_output='A 4 paragraph article on {topic} advancements formatted as markdown.',
|
||||
tools=[search_tool],
|
||||
agent=writer
|
||||
agent=writer,
|
||||
async_execution=False,
|
||||
output_file='new-blog-post.md' # Example of output customization
|
||||
)
|
||||
```
|
||||
|
||||
## Step 3: Form the Crew
|
||||
Combine your agents into a crew, setting the workflow process they'll follow to accomplish the tasks.
|
||||
Combine your agents into a crew, setting the workflow process they'll follow to accomplish the tasks, now with the option to configure language models for enhanced interaction.
|
||||
|
||||
```python
|
||||
from crewai import Crew, Process
|
||||
|
||||
# Forming the tech-focused crew
|
||||
# Forming the tech-focused crew with enhanced configurations
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
tasks=[research_task, write_task],
|
||||
process=Process.sequential # Sequential task execution
|
||||
process=Process.sequential # Optional: Sequential task execution is default
|
||||
)
|
||||
```
|
||||
|
||||
## Step 4: Kick It Off
|
||||
With your crew ready and the stage set, initiate the process. Watch as your agents collaborate, each contributing their expertise to achieve the collective goal.
|
||||
Initiate the process with your enhanced crew ready. Observe as your agents collaborate, leveraging their new capabilities for a successful project outcome. You can also pass the inputs that will be interpolated into the agents and tasks.
|
||||
|
||||
```python
|
||||
# Starting the task execution process
|
||||
result = crew.kickoff()
|
||||
# Starting the task execution process with enhanced feedback
|
||||
result = crew.kickoff(inputs={'topic': 'AI in healthcare'})
|
||||
print(result)
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
Building and activating a crew in CrewAI is a seamless process. By carefully assigning roles, tasks, and a clear process, your AI team is equipped to tackle challenges efficiently. The depth of agent backstories and the precision of their objectives enrich the collaboration, leading to successful project outcomes.
|
||||
Building and activating a crew in CrewAI has evolved with new functionalities. By incorporating verbose mode, memory capabilities, asynchronous task execution, output customization, and language model configuration, your AI team is more equipped than ever to tackle challenges efficiently. The depth of agent backstories and the precision of their objectives enrich collaboration, leading to successful project outcomes.
|
||||
|
||||
@@ -1,55 +1,73 @@
|
||||
---
|
||||
title: Customizing Agents in CrewAI
|
||||
description: A guide to tailoring agents for specific roles and tasks within the CrewAI framework.
|
||||
description: A comprehensive guide to tailoring agents for specific roles, tasks, and advanced customizations within the CrewAI framework.
|
||||
---
|
||||
|
||||
## Customizable Attributes
|
||||
Tailoring your AI agents is pivotal in crafting an efficient CrewAI team. Customization allows agents to be dynamically adapted to the unique requirements of any project.
|
||||
Crafting an efficient CrewAI team hinges on the ability to tailor your AI agents dynamically to meet the unique requirements of any project. This section covers the foundational attributes you can customize.
|
||||
|
||||
### Key Attributes for Customization
|
||||
- **Role**: Defines the agent's job within the crew, such as 'Analyst' or 'Customer Service Rep'.
|
||||
- **Goal**: The agent's objective, aligned with its role and the crew's overall goals.
|
||||
- **Backstory**: Adds depth to the agent's character, enhancing its role and motivations within the crew.
|
||||
- **Tools**: The capabilities or methods the agent employs to accomplish tasks, ranging from simple functions to complex integrations.
|
||||
- **Role**: Specifies the agent's job within the crew, such as 'Analyst' or 'Customer Service Rep'.
|
||||
- **Goal**: Defines what the agent aims to achieve, in alignment with its role and the overarching objectives of the crew.
|
||||
- **Backstory**: Provides depth to the agent's persona, enriching its motivations and engagements within the crew.
|
||||
- **Tools**: Represents the capabilities or methods the agent uses to perform tasks, from simple functions to intricate integrations.
|
||||
|
||||
## Understanding Tools in CrewAI
|
||||
Tools empower agents with functionalities to interact and manipulate their environment, from generic utilities to specialized functions. Integrating with LangChain offers access to a broad range of tools for diverse tasks.
|
||||
## Advanced Customization Options
|
||||
Beyond the basic attributes, CrewAI allows for deeper customization to enhance an agent's behavior and capabilities significantly.
|
||||
|
||||
### Language Model Customization
|
||||
Agents can be customized with specific language models (`llm`) and function-calling language models (`function_calling_llm`), offering advanced control over their processing and decision-making abilities.
|
||||
By default crewAI agents are ReAct agents, but by setting the `function_calling_llm` you can turn them into a function calling agents.
|
||||
|
||||
### Enabling Memory for Agents
|
||||
CrewAI supports memory for agents, enabling them to remember past interactions. This feature is critical for tasks requiring awareness of previous contexts or decisions.
|
||||
|
||||
## Performance and Debugging Settings
|
||||
Adjusting an agent's performance and monitoring its operations are crucial for efficient task execution.
|
||||
|
||||
### Verbose Mode and RPM Limit
|
||||
- **Verbose Mode**: Enables detailed logging of an agent's actions, useful for debugging and optimization. Specifically, it provides insights into agent execution processes, aiding in the optimization of performance.
|
||||
- **RPM Limit**: Sets the maximum number of requests per minute (`max_rpm`), controlling the agent's query frequency to external services.
|
||||
|
||||
### Maximum Iterations for Task Execution
|
||||
The `max_iter` attribute allows users to define the maximum number of iterations an agent can perform for a single task, preventing infinite loops or excessively long executions. The default value is set to 15, providing a balance between thoroughness and efficiency. Once the agent approaches this number it will try it's best to give a good answer.
|
||||
|
||||
## Customizing Agents and Tools
|
||||
Agents are customized by defining their attributes during initialization, with tools being a critical aspect of their functionality.
|
||||
Agents are customized by defining their attributes and tools during initialization. Tools are critical for an agent's functionality, enabling them to perform specialized tasks. In this example we will use the crewAI tools package to create a tool for a research analyst agent.
|
||||
|
||||
```shell
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
### Example: Assigning Tools to an Agent
|
||||
```python
|
||||
from crewai import Agent
|
||||
from langchain.agents import Tool
|
||||
from langchain.utilities import GoogleSerperAPIWrapper
|
||||
import os
|
||||
from crewai import Agent
|
||||
from crewai_tools import SerperDevTool
|
||||
|
||||
# Set API keys for tool initialization
|
||||
os.environ["OPENAI_API_KEY"] = "Your Key"
|
||||
os.environ["SERPER_API_KEY"] = "Your Key"
|
||||
|
||||
# Initialize a search tool
|
||||
search_tool = GoogleSerperAPIWrapper()
|
||||
search_tool = SerperDevTool()
|
||||
|
||||
# Define and assign the tool to an agent
|
||||
serper_tool = Tool(
|
||||
name="Intermediate Answer",
|
||||
func=search_tool.run,
|
||||
description="Useful for search-based queries"
|
||||
)
|
||||
|
||||
# Initialize the agent with the tool
|
||||
# Initialize the agent with advanced options
|
||||
agent = Agent(
|
||||
role='Research Analyst',
|
||||
goal='Provide up-to-date market analysis',
|
||||
backstory='An expert analyst with a keen eye for market trends.',
|
||||
tools=[serper_tool]
|
||||
tools=[search_tool],
|
||||
memory=True,
|
||||
verbose=True,
|
||||
max_rpm=10, # Optional: Limit requests to 10 per minute, preventing API abuse
|
||||
max_iter=5, # Optional: Limit task iterations to 5 before the agent tries to give its best answer
|
||||
allow_delegation=False
|
||||
)
|
||||
```
|
||||
|
||||
## Delegation and Autonomy
|
||||
Agents in CrewAI can delegate tasks or ask questions, enhancing the crew's collaborative dynamics. This feature can be disabled to ensure straightforward task execution.
|
||||
Controlling an agent's ability to delegate tasks or ask questions is vital for tailoring its autonomy and collaborative dynamics within the crewAI framework. By default, the `allow_delegation` attribute is set to `True`, enabling agents to seek assistance or delegate tasks as needed. This default behavior promotes collaborative problem-solving and efficiency within the crewAI ecosystem.
|
||||
|
||||
### Example: Disabling Delegation for an Agent
|
||||
```python
|
||||
@@ -62,4 +80,4 @@ agent = Agent(
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
Customizing agents is key to leveraging the full potential of CrewAI. By thoughtfully setting agents' roles, goals, backstories, and tools, you craft a nuanced and capable AI team ready to tackle complex challenges.
|
||||
Customizing agents in CrewAI by setting their roles, goals, backstories, and tools, alongside advanced options like language model customization, memory, performance settings, and delegation preferences, equips a nuanced and capable AI team ready for complex challenges.
|
||||
@@ -1,60 +1,61 @@
|
||||
---
|
||||
title: Implementing the Hierarchical Process in CrewAI
|
||||
description: Understanding and applying the hierarchical process within your CrewAI projects.
|
||||
description: A comprehensive guide to understanding and applying the hierarchical process within your CrewAI projects, updated to reflect the latest coding practices and functionalities.
|
||||
---
|
||||
|
||||
## Introduction
|
||||
The hierarchical process in CrewAI introduces a structured approach to task management, mimicking traditional organizational hierarchies for efficient task delegation and execution.
|
||||
The hierarchical process in CrewAI introduces a structured approach to task management, simulating traditional organizational hierarchies for efficient task delegation and execution. This systematic workflow enhances project outcomes by ensuring tasks are handled with optimal efficiency and accuracy.
|
||||
|
||||
!!! note "Complexity"
|
||||
The current implementation of the hierarchical process relies on tools usage that usually require more complex models like GPT-4 and usually imply of a higher token usage.
|
||||
!!! note "Complexity and Efficiency"
|
||||
The hierarchical process is designed to leverage advanced models like GPT-4, optimizing token usage while handling complex tasks with greater efficiency.
|
||||
|
||||
## Hierarchical Process Overview
|
||||
In this process, tasks are assigned and executed based on a defined hierarchy, where a 'manager' agent coordinates the workflow, delegating tasks to other agents and validating their outcomes before proceeding.
|
||||
By default, tasks in CrewAI are managed through a sequential process. However, adopting a hierarchical approach allows for a clear hierarchy in task management, where a 'manager' agent coordinates the workflow, delegates tasks, and validates outcomes for streamlined and effective execution. This manager agent is automatically created by crewAI so you don't need to worry about it.
|
||||
|
||||
### Key Features
|
||||
- **Task Delegation**: A manager agent oversees task distribution among crew members.
|
||||
- **Result Validation**: The manager reviews outcomes before passing tasks along, ensuring quality and relevance.
|
||||
- **Efficient Workflow**: Mimics corporate structures for a familiar and organized task management approach.
|
||||
- **Task Delegation**: A manager agent allocates tasks among crew members based on their roles and capabilities.
|
||||
- **Result Validation**: The manager evaluates outcomes to ensure they meet the required standards.
|
||||
- **Efficient Workflow**: Emulates corporate structures, providing an organized approach to task management.
|
||||
|
||||
## Implementing the Hierarchical Process
|
||||
To utilize the hierarchical process, you must define a crew with a designated manager and a clear chain of command for task execution.
|
||||
To utilize the hierarchical process, it's essential to explicitly set the process attribute to `Process.hierarchical`, as the default behavior is `Process.sequential`. Define a crew with a designated manager and establish a clear chain of command.
|
||||
|
||||
!!! note "Tools on the hierarchical process"
|
||||
For tools when using the hierarchical process, you want to make sure to assign them to the agents instead of the tasks, as the manager will be the one delegating the tasks and the agents will be the ones executing them.
|
||||
!!! note "Tools and Agent Assignment"
|
||||
Assign tools at the agent level to facilitate task delegation and execution by the designated agents under the manager's guidance.
|
||||
|
||||
!!! note "Manager LLM"
|
||||
A manager will be automatically set for the crew, you don't need to define it. You do need to set the `manager_llm` parameter in the crew though.
|
||||
!!! note "Manager LLM Requirement"
|
||||
Configuring the `manager_llm` parameter is crucial for the hierarchical process. The system requires a manager LLM to be set up for proper function, ensuring tailored decision-making.
|
||||
|
||||
```python
|
||||
from langchain_openai import ChatOpenAI
|
||||
from crewai import Crew, Process, Agent
|
||||
|
||||
# Define your agents, no need to define a manager
|
||||
# Agents are defined with an optional tools parameter
|
||||
researcher = Agent(
|
||||
role='Researcher',
|
||||
goal='Conduct in-depth analysis',
|
||||
# tools = [...]
|
||||
role='Researcher',
|
||||
goal='Conduct in-depth analysis',
|
||||
# tools=[] # This can be optionally specified; defaults to an empty list
|
||||
)
|
||||
writer = Agent(
|
||||
role='Writer',
|
||||
goal='Create engaging content',
|
||||
# tools = [...]
|
||||
role='Writer',
|
||||
goal='Create engaging content',
|
||||
# tools=[] # Optionally specify tools; defaults to an empty list
|
||||
)
|
||||
|
||||
# Form the crew with a hierarchical process
|
||||
# Establishing the crew with a hierarchical process
|
||||
project_crew = Crew(
|
||||
tasks=[...], # Tasks that that manager will figure out how to complete
|
||||
agents=[researcher, writer],
|
||||
manager_llm=ChatOpenAI(temperature=0, model="gpt-4"), # The manager's LLM that will be used internally
|
||||
process=Process.hierarchical # Designating the hierarchical approach
|
||||
tasks=[...], # Tasks to be delegated and executed under the manager's supervision
|
||||
agents=[researcher, writer],
|
||||
manager_llm=ChatOpenAI(temperature=0, model="gpt-4"), # Mandatory for hierarchical process
|
||||
process=Process.hierarchical # Specifies the hierarchical management approach
|
||||
)
|
||||
```
|
||||
|
||||
### Workflow in Action
|
||||
1. **Task Assignment**: The manager assigns tasks based on agent roles and capabilities.
|
||||
2. **Execution and Review**: Agents perform their tasks, with the manager reviewing outcomes for approval.
|
||||
3. **Sequential Task Progression**: Tasks are completed in a sequence dictated by the manager, ensuring orderly progression.
|
||||
1. **Task Assignment**: The manager assigns tasks strategically, considering each agent's capabilities.
|
||||
2. **Execution and Review**: Agents complete their tasks, with the manager ensuring quality standards.
|
||||
3. **Sequential Task Progression**: Despite being a hierarchical process, tasks follow a logical order for smooth progression, facilitated by the manager's oversight.
|
||||
|
||||
|
||||
## Conclusion
|
||||
The hierarchical process in CrewAI offers a familiar, structured way to manage tasks within a project. By leveraging a chain of command, it enhances efficiency and quality control, making it ideal for complex projects requiring meticulous oversight.
|
||||
Adopting the hierarchical process in crewAI, with the correct configurations and understanding of the system's capabilities, facilitates an organized and efficient approach to project management.
|
||||
@@ -1,63 +1,81 @@
|
||||
# Human Input on Execution
|
||||
---
|
||||
title: Human Input on Execution
|
||||
description: Comprehensive guide on integrating CrewAI with human input during execution in complex decision-making processes or when needed help during complex tasks.
|
||||
---
|
||||
|
||||
Human inputs is important in many agent execution use cases, humans are AGI so they can can be prompted to step in and provide extra details ins necessary.
|
||||
Using it with crewAI is pretty straightforward and you can do it through a LangChain Tool.
|
||||
Check [LangChain Integration](https://python.langchain.com/docs/integrations/tools/human_tools) for more details:
|
||||
# Human Input in Agent Execution
|
||||
|
||||
Example:
|
||||
Human input plays a pivotal role in several agent execution scenarios, enabling agents to seek additional information or clarification when necessary. This capability is invaluable in complex decision-making processes or when agents need more details to complete a task effectively.
|
||||
|
||||
## Using Human Input with CrewAI
|
||||
|
||||
Incorporating human input with CrewAI is straightforward, enhancing the agent's ability to make informed decisions. While the documentation previously mentioned using a "LangChain Tool" and a specific "DuckDuckGoSearchRun" tool from `langchain_community.tools`, it's important to clarify that the integration of such tools should align with the actual capabilities and configurations defined within your `Agent` class setup.
|
||||
|
||||
### Example:
|
||||
|
||||
```shell
|
||||
pip install crewai
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
```python
|
||||
import os
|
||||
from crewai import Agent, Task, Crew, Process
|
||||
from langchain_community.tools import DuckDuckGoSearchRun
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai_tools import SerperDevTool
|
||||
|
||||
from langchain.agents import load_tools
|
||||
|
||||
search_tool = DuckDuckGoSearchRun()
|
||||
os.environ["SERPER_API_KEY"] = "Your Key" # serper.dev API key
|
||||
os.environ["OPENAI_API_KEY"] = "Your Key"
|
||||
|
||||
|
||||
# Loading Human Tools
|
||||
human_tools = load_tools(["human"])
|
||||
search_tool = SerperDevTool()
|
||||
|
||||
# Define your agents with roles and goals
|
||||
# Define your agents with roles, goals, and tools
|
||||
researcher = Agent(
|
||||
role='Senior Research Analyst',
|
||||
goal='Uncover cutting-edge developments in AI and data science in',
|
||||
backstory="""You are a Senior Research Analyst at a leading tech think tank.
|
||||
Your expertise lies in identifying emerging trends and technologies in AI and
|
||||
data science. You have a knack for dissecting complex data and presenting
|
||||
actionable insights.""",
|
||||
goal='Uncover cutting-edge developments in AI and data science',
|
||||
backstory=(
|
||||
"You are a Senior Research Analyst at a leading tech think tank."
|
||||
"Your expertise lies in identifying emerging trends and technologies in AI and data science."
|
||||
"You have a knack for dissecting complex data and presenting actionable insights."
|
||||
),
|
||||
verbose=True,
|
||||
allow_delegation=False,
|
||||
# Passing human tools to the agent
|
||||
tools=[search_tool]+human_tools
|
||||
tools=[search_tool]+human_tools # Passing human tools to the agent
|
||||
)
|
||||
writer = Agent(
|
||||
role='Tech Content Strategist',
|
||||
goal='Craft compelling content on tech advancements',
|
||||
backstory="""You are a renowned Tech Content Strategist, known for your insightful
|
||||
and engaging articles on technology and innovation. With a deep understanding of
|
||||
the tech industry, you transform complex concepts into compelling narratives.""",
|
||||
backstory=(
|
||||
"You are a renowned Tech Content Strategist, known for your insightful and engaging articles on technology and innovation."
|
||||
"With a deep understanding of the tech industry, you transform complex concepts into compelling narratives."
|
||||
),
|
||||
verbose=True,
|
||||
allow_delegation=True
|
||||
)
|
||||
|
||||
# Create tasks for your agents
|
||||
# Being explicit on the task to ask for human feedback.
|
||||
task1 = Task(
|
||||
description="""Conduct a comprehensive analysis of the latest advancements in AI in 2024.
|
||||
Identify key trends, breakthrough technologies, and potential industry impacts.
|
||||
Compile your findings in a detailed report.
|
||||
Make sure to check with the human if the draft is good before returning your Final Answer.
|
||||
Your final answer MUST be a full analysis report""",
|
||||
agent=researcher
|
||||
description=(
|
||||
"Conduct a comprehensive analysis of the latest advancements in AI in 2024."
|
||||
"Identify key trends, breakthrough technologies, and potential industry impacts."
|
||||
"Compile your findings in a detailed report."
|
||||
"Make sure to check with a human if the draft is good before finalizing your answer."
|
||||
),
|
||||
expected_output='A comprehensive full report on the latest AI advancements in 2024, leave nothing out',
|
||||
agent=researcher,
|
||||
)
|
||||
|
||||
task2 = Task(
|
||||
description="""Using the insights from the researcher's report, develop an engaging blog
|
||||
post that highlights the most significant AI advancements.
|
||||
Your post should be informative yet accessible, catering to a tech-savvy audience.
|
||||
Aim for a narrative that captures the essence of these breakthroughs and their
|
||||
implications for the future.
|
||||
Your final answer MUST be the full blog post of at least 3 paragraphs.""",
|
||||
description=(
|
||||
"Using the insights from the researcher's report, develop an engaging blog post that highlights the most significant AI advancements."
|
||||
"Your post should be informative yet accessible, catering to a tech-savvy audience."
|
||||
"Aim for a narrative that captures the essence of these breakthroughs and their implications for the future."
|
||||
),
|
||||
expected_output='A compelling 3 paragraphs blog post formatted as markdown about the latest AI advancements in 2024',
|
||||
agent=writer
|
||||
)
|
||||
|
||||
@@ -73,4 +91,4 @@ result = crew.kickoff()
|
||||
|
||||
print("######################")
|
||||
print(result)
|
||||
```
|
||||
```
|
||||
|
||||
@@ -1,80 +1,82 @@
|
||||
---
|
||||
title: Connect CrewAI to LLMs
|
||||
description: Guide on integrating CrewAI with various Large Language Models (LLMs).
|
||||
description: Comprehensive guide on integrating CrewAI with various Large Language Models (LLMs), including detailed class attributes and methods.
|
||||
---
|
||||
|
||||
## Connect CrewAI to LLMs
|
||||
!!! note "Default LLM"
|
||||
By default, crewAI uses OpenAI's GPT-4 model for language processing. However, you can configure your agents to use a different model or API. This guide will show you how to connect your agents to different LLMs.
|
||||
By default, CrewAI uses OpenAI's GPT-4 model for language processing. However, you can configure your agents to use a different model or API. This guide will show you how to connect your agents to different LLMs through environment variables and direct instantiation.
|
||||
|
||||
CrewAI offers flexibility in connecting to various LLMs, including local models via [Ollama](https://ollama.ai) and different APIs like Azure. It's compatible with all [LangChain LLM](https://python.langchain.com/docs/integrations/llms/) components, enabling diverse integrations for tailored AI solutions.
|
||||
|
||||
## Ollama Integration
|
||||
Ollama is preferred for local LLM integration, offering customization and privacy benefits. It requires installation and configuration, including model adjustments via a Modelfile to optimize performance.
|
||||
## CrewAI Agent Overview
|
||||
The `Agent` class is the cornerstone for implementing AI solutions in CrewAI. Here's an updated overview reflecting the latest codebase changes:
|
||||
|
||||
### Setting Up Ollama
|
||||
- **Installation**: Follow Ollama's guide for setup.
|
||||
- **Configuration**: [Adjust your local model with a Modelfile](https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md), considering adding `Observation` as a stop word and playing with parameters like `top_p` and `temperature`.
|
||||
|
||||
### Integrating Ollama with CrewAI
|
||||
Instantiate Ollama and pass it to your agents within CrewAI, enhancing them with the local model's capabilities.
|
||||
- **Attributes**:
|
||||
- `role`: Defines the agent's role within the solution.
|
||||
- `goal`: Specifies the agent's objective.
|
||||
- `backstory`: Provides a background story to the agent.
|
||||
- `llm`: Indicates the Large Language Model the agent uses.
|
||||
- `function_calling_llm` *Optinal*: Will turn the ReAct crewAI agent into a function calling agent.
|
||||
- `max_iter`: Maximum number of iterations for an agent to execute a task, default is 15.
|
||||
- `memory`: Enables the agent to retain information during the execution.
|
||||
- `max_rpm`: Sets the maximum number of requests per minute.
|
||||
- `verbose`: Enables detailed logging of the agent's execution.
|
||||
- `allow_delegation`: Allows the agent to delegate tasks to other agents, default is `True`.
|
||||
- `tools`: Specifies the tools available to the agent for task execution.
|
||||
- `step_callback`: Provides a callback function to be executed after each step.
|
||||
|
||||
```python
|
||||
from langchain_community.llms import Ollama
|
||||
# Required
|
||||
os.environ["OPENAI_MODEL_NAME"]="gpt-4-0125-preview"
|
||||
|
||||
# Assuming you have Ollama installed and downloaded the openhermes model
|
||||
ollama_openhermes = Ollama(model="openhermes")
|
||||
|
||||
local_expert = Agent(
|
||||
# Agent will automatically use the model defined in the environment variable
|
||||
example_agent = Agent(
|
||||
role='Local Expert',
|
||||
goal='Provide insights about the city',
|
||||
backstory="A knowledgeable local guide.",
|
||||
tools=[SearchTools.search_internet, BrowserTools.scrape_and_summarize_website],
|
||||
llm=ollama_openhermes,
|
||||
verbose=True
|
||||
)
|
||||
```
|
||||
|
||||
## Ollama Integration
|
||||
Ollama is preferred for local LLM integration, offering customization and privacy benefits. To integrate Ollama with CrewAI, set the appropriate environment variables as shown below. Note: Detailed Ollama setup is beyond this document's scope, but general guidance is provided.
|
||||
|
||||
### Setting Up Ollama
|
||||
- **Environment Variables Configuration**: To integrate Ollama, set the following environment variables:
|
||||
```sh
|
||||
OPENAI_API_BASE='http://localhost:11434/v1'
|
||||
OPENAI_MODEL_NAME='openhermes' # Adjust based on available model
|
||||
OPENAI_API_KEY=''
|
||||
```
|
||||
|
||||
## OpenAI Compatible API Endpoints
|
||||
You can use environment variables for easy switch between APIs and models, supporting diverse platforms like FastChat, LM Studio, and Mistral AI.
|
||||
Switch between APIs and models seamlessly using environment variables, supporting platforms like FastChat, LM Studio, and Mistral AI.
|
||||
|
||||
### Configuration Examples
|
||||
|
||||
### FastChat
|
||||
#### FastChat
|
||||
```sh
|
||||
# Required
|
||||
OPENAI_API_BASE="http://localhost:8001/v1"
|
||||
OPENAI_MODEL_NAME='oh-2.5m7b-q51'
|
||||
OPENAI_API_KEY=NA
|
||||
MODEL_NAME='oh-2.5m7b-q51' # Depending on the model you have available
|
||||
```
|
||||
|
||||
### LM Studio
|
||||
#### LM Studio
|
||||
```sh
|
||||
# Required
|
||||
OPENAI_API_BASE="http://localhost:8000/v1"
|
||||
OPENAI_MODEL_NAME=NA
|
||||
OPENAI_API_KEY=NA
|
||||
MODEL_NAME=NA
|
||||
```
|
||||
|
||||
### Mistral API
|
||||
#### Mistral API
|
||||
```sh
|
||||
OPENAI_API_KEY=your-mistral-api-key
|
||||
OPENAI_API_BASE=https://api.mistral.ai/v1
|
||||
MODEL_NAME="mistral-small" # Check documentation for available models
|
||||
OPENAI_MODEL_NAME="mistral-small"
|
||||
```
|
||||
|
||||
### text-gen-web-ui
|
||||
```sh
|
||||
# Required
|
||||
API_BASE_URL=http://localhost:5000
|
||||
OPENAI_API_KEY=NA
|
||||
MODEL_NAME=NA
|
||||
```
|
||||
|
||||
### Azure Open AI
|
||||
Azure's OpenAI API needs a distinct setup, utilizing the `langchain_openai` component for Azure-specific configurations.
|
||||
|
||||
Configuration settings:
|
||||
### Azure Open AI Configuration
|
||||
For Azure OpenAI API integration, set the following environment variables:
|
||||
```sh
|
||||
AZURE_OPENAI_VERSION="2022-12-01"
|
||||
AZURE_OPENAI_DEPLOYMENT=""
|
||||
@@ -82,22 +84,24 @@ AZURE_OPENAI_ENDPOINT=""
|
||||
AZURE_OPENAI_KEY=""
|
||||
```
|
||||
|
||||
### Example Agent with Azure LLM
|
||||
```python
|
||||
from dotenv import load_dotenv
|
||||
from crewai import Agent
|
||||
from langchain_openai import AzureChatOpenAI
|
||||
|
||||
load_dotenv()
|
||||
|
||||
default_llm = AzureChatOpenAI(
|
||||
azure_llm = AzureChatOpenAI(
|
||||
azure_endpoint=os.environ.get("AZURE_OPENAI_ENDPOINT"),
|
||||
api_key=os.environ.get("AZURE_OPENAI_KEY")
|
||||
)
|
||||
|
||||
example_agent = Agent(
|
||||
azure_agent = Agent(
|
||||
role='Example Agent',
|
||||
goal='Demonstrate custom LLM configuration',
|
||||
backstory='A diligent explorer of GitHub docs.',
|
||||
llm=default_llm
|
||||
llm=azure_llm
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
@@ -1,37 +1,47 @@
|
||||
---
|
||||
title: Implementing the Sequential Process in CrewAI
|
||||
description: A guide to utilizing the sequential process for task execution in CrewAI projects.
|
||||
title: Using the Sequential Processes in crewAI
|
||||
description: A comprehensive guide to utilizing the sequential processe for task execution in crewAI projects.
|
||||
---
|
||||
|
||||
## Introduction
|
||||
The sequential process in CrewAI ensures tasks are executed one after the other, following a linear progression. This approach is akin to a relay race, where each agent completes their task before passing the baton to the next.
|
||||
CrewAI offers a flexible framework for executing tasks in a structured manner, supporting both sequential and hierarchical processes. This guide outlines how to effectively implement these processes to ensure efficient task execution and project completion.
|
||||
|
||||
## Sequential Process Overview
|
||||
This process is straightforward and effective, particularly for projects where tasks must be completed in a specific order to achieve the desired outcome.
|
||||
The sequential process ensures tasks are executed one after the other, following a linear progression. This approach is ideal for projects requiring tasks to be completed in a specific order.
|
||||
|
||||
### Key Features
|
||||
- **Linear Task Flow**: Tasks are handled in a predetermined sequence, ensuring orderly progression.
|
||||
- **Simplicity**: Ideal for projects with clearly defined, step-by-step tasks.
|
||||
- **Easy Monitoring**: Task completion can be easily tracked, offering clear insights into project progress.
|
||||
- **Linear Task Flow**: Ensures orderly progression by handling tasks in a predetermined sequence.
|
||||
- **Simplicity**: Best suited for projects with clear, step-by-step tasks.
|
||||
- **Easy Monitoring**: Facilitates easy tracking of task completion and project progress.
|
||||
|
||||
|
||||
## Implementing the Sequential Process
|
||||
To apply the sequential process, assemble your crew and define the tasks in the order they need to be executed.
|
||||
|
||||
!!! note "Task assignment"
|
||||
In the sequential process you need to make sure all tasks are assigned to the agents, as the agents will be the ones executing them.
|
||||
Assemble your crew and define tasks in the order they need to be executed.
|
||||
|
||||
```python
|
||||
from crewai import Crew, Process, Agent, Task
|
||||
|
||||
# Define your agents
|
||||
researcher = Agent(role='Researcher', goal='Conduct foundational research')
|
||||
analyst = Agent(role='Data Analyst', goal='Analyze research findings')
|
||||
writer = Agent(role='Writer', goal='Draft the final report')
|
||||
researcher = Agent(
|
||||
role='Researcher',
|
||||
goal='Conduct foundational research',
|
||||
backstory='An experienced researcher with a passion for uncovering insights'
|
||||
)
|
||||
analyst = Agent(
|
||||
role='Data Analyst',
|
||||
goal='Analyze research findings',
|
||||
backstory='A meticulous analyst with a knack for uncovering patterns'
|
||||
)
|
||||
writer = Agent(
|
||||
role='Writer',
|
||||
goal='Draft the final report',
|
||||
backstory='A skilled writer with a talent for crafting compelling narratives'
|
||||
)
|
||||
|
||||
# Define the tasks in sequence
|
||||
research_task = Task(description='Gather relevant data', agent=researcher)
|
||||
analysis_task = Task(description='Analyze the data', agent=analyst)
|
||||
writing_task = Task(description='Compose the report', agent=writer)
|
||||
research_task = Task(description='Gather relevant data...', agent=researcher)
|
||||
analysis_task = Task(description='Analyze the data...', agent=analyst)
|
||||
writing_task = Task(description='Compose the report...', agent=writer)
|
||||
|
||||
# Form the crew with a sequential process
|
||||
report_crew = Crew(
|
||||
@@ -42,9 +52,9 @@ report_crew = Crew(
|
||||
```
|
||||
|
||||
### Workflow in Action
|
||||
1. **Initial Task**: The first agent completes their task and signals completion.
|
||||
2. **Subsequent Tasks**: Following agents pick up their tasks in the order defined, using the outcomes of preceding tasks as inputs.
|
||||
3. **Completion**: The process concludes once the final task is executed, culminating in the project's completion.
|
||||
1. **Initial Task**: In a sequential process, the first agent completes their task and signals completion.
|
||||
2. **Subsequent Tasks**: Agents pick up their tasks based on the process type, with outcomes of preceding tasks or manager directives guiding their execution.
|
||||
3. **Completion**: The process concludes once the final task is executed, leading to project completion.
|
||||
|
||||
## Conclusion
|
||||
The sequential process in CrewAI provides a clear, straightforward path for task execution. It's particularly suited for projects requiring a logical progression of tasks, ensuring each step is completed before the next begins, thereby facilitating a cohesive final product.
|
||||
|
||||
@@ -44,7 +44,12 @@ Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="./how-to/how-to/Sequential">
|
||||
<a href="./how-to/Create-Custom-Tools">
|
||||
Create Custom Tools
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="./how-to/Sequential">
|
||||
Using Sequential Process
|
||||
</a>
|
||||
</li>
|
||||
|
||||
@@ -1,17 +1,27 @@
|
||||
---
|
||||
title: Telemetry
|
||||
description: Understanding the telemetry data collected by CrewAI and how it contributes to the enhancement of the library.
|
||||
---
|
||||
## Telemetry
|
||||
|
||||
CrewAI uses anonymous telemetry to collect usage data with the main purpose of helping us improve the library by focusing our efforts on the most used features, integrations and tools.
|
||||
CrewAI utilizes anonymous telemetry to gather usage statistics with the primary goal of enhancing the library. Our focus is on improving and developing the features, integrations, and tools most utilized by our users.
|
||||
|
||||
There is NO data being collected on the prompts, tasks descriptions agents backstories or goals nor tools usage, no API calls, nor responses nor any data that is being processed by the agents, nor any secrets and env vars.
|
||||
It's pivotal to understand that **NO data is collected** concerning prompts, task descriptions, agents' backstories or goals, usage of tools, API calls, responses, any data processed by the agents, or secrets and environment variables, with the exception of the conditions mentioned. When the `share_crew` feature is enabled, detailed data including task descriptions, agents' backstories or goals, and other specific attributes are collected to provide deeper insights while respecting user privacy.
|
||||
|
||||
Data collected includes:
|
||||
- Version of crewAI
|
||||
- Version of Python
|
||||
- General OS (e.g. number of CPUs, macOS/Windows/Linux)
|
||||
- Number of agents and tasks in a crew
|
||||
- Crew Process being used
|
||||
- If Agents are using memory or allowing delegation
|
||||
- If Tasks are being executed in parallel or sequentially
|
||||
- Language model being used
|
||||
- Roles of agents in a crew
|
||||
- Tools names available
|
||||
### Data Collected Includes:
|
||||
- **Version of CrewAI**: Assessing the adoption rate of our latest version helps us understand user needs and guide our updates.
|
||||
- **Python Version**: Identifying the Python versions our users operate with assists in prioritizing our support efforts for these versions.
|
||||
- **General OS Information**: Details like the number of CPUs and the operating system type (macOS, Windows, Linux) enable us to focus our development on the most used operating systems and explore the potential for OS-specific features.
|
||||
- **Number of Agents and Tasks in a Crew**: Ensures our internal testing mirrors real-world scenarios, helping us guide users towards best practices.
|
||||
- **Crew Process Utilization**: Understanding how crews are utilized aids in directing our development focus.
|
||||
- **Memory and Delegation Use by Agents**: Insights into how these features are used help evaluate their effectiveness and future.
|
||||
- **Task Execution Mode**: Knowing whether tasks are executed in parallel or sequentially influences our emphasis on enhancing parallel execution capabilities.
|
||||
- **Language Model Utilization**: Supports our goal to improve support for the most popular languages among our users.
|
||||
- **Roles of Agents within a Crew**: Understanding the various roles agents play aids in crafting better tools, integrations, and examples.
|
||||
- **Tool Usage**: Identifying which tools are most frequently used allows us to prioritize improvements in those areas.
|
||||
|
||||
### Opt-In Further Telemetry Sharing
|
||||
Users can choose to share their complete telemetry data by enabling the `share_crew` attribute to `True` in their crew configurations. This opt-in approach respects user privacy and aligns with data protection standards by ensuring users have control over their data sharing preferences. Enabling `share_crew` results in the collection of detailed `crew` and `task` execution data, including `goal`, `backstory`, `context`, and `output` of tasks. This enables a deeper insight into usage patterns while respecting the user's choice to share.
|
||||
|
||||
### Updates and Revisions
|
||||
We are committed to maintaining the accuracy and transparency of our documentation. Regular reviews and updates are performed to ensure our documentation accurately reflects the latest developments of our codebase and telemetry practices. Users are encouraged to review this section for the most current information on our data collection practices and how they contribute to the improvement of CrewAI.
|
||||
|
||||
37
docs/tools/CSVSearchTool.md
Normal file
37
docs/tools/CSVSearchTool.md
Normal file
@@ -0,0 +1,37 @@
|
||||
# CSVSearchTool
|
||||
|
||||
!!! note "Depend on OpenAI"
|
||||
All RAG tools at the moment can only use openAI to generate embeddings, we are working on adding support for other providers.
|
||||
|
||||
!!! note "Experimental"
|
||||
We are still working on improving tools, so there might be unexpected behavior or changes in the future.
|
||||
|
||||
## Description
|
||||
|
||||
This tool is used to perform a RAG (Retrieval-Augmented Generation) search within a CSV file's content. It allows users to semantically search for queries in the content of a specified CSV file. This feature is particularly useful for extracting information from large CSV datasets where traditional search methods might be inefficient. All tools with "Search" in their name, including CSVSearchTool, are RAG tools designed for searching different sources of data.
|
||||
|
||||
## Installation
|
||||
|
||||
Install the crewai_tools package
|
||||
|
||||
```shell
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
## Example
|
||||
|
||||
```python
|
||||
from crewai_tools import CSVSearchTool
|
||||
|
||||
# Initialize the tool with a specific CSV file. This setup allows the agent to only search the given CSV file.
|
||||
tool = CSVSearchTool(csv='path/to/your/csvfile.csv')
|
||||
|
||||
# OR
|
||||
|
||||
# Initialize the tool without a specific CSV file. Agent will need to provide the CSV path at runtime.
|
||||
tool = CSVSearchTool()
|
||||
```
|
||||
|
||||
## Arguments
|
||||
|
||||
- `csv` : The path to the CSV file you want to search. This is a mandatory argument if the tool was initialized without a specific CSV file; otherwise, it is optional.
|
||||
0
docs/tools/CodeDocsSearchTool.md
Normal file
0
docs/tools/CodeDocsSearchTool.md
Normal file
35
docs/tools/DOCXSearchTool.md
Normal file
35
docs/tools/DOCXSearchTool.md
Normal file
@@ -0,0 +1,35 @@
|
||||
# DOCXSearchTool
|
||||
|
||||
!!! note "Depend on OpenAI"
|
||||
All RAG tools at the moment can only use openAI to generate embeddings, we are working on adding support for other providers.
|
||||
|
||||
!!! note "Experimental"
|
||||
We are still working on improving tools, so there might be unexpected behavior or changes in the future.
|
||||
|
||||
## Description
|
||||
The DOCXSearchTool is a RAG tool designed for semantic searching within DOCX documents. It enables users to effectively search and extract relevant information from DOCX files using query-based searches. This tool is invaluable for data analysis, information management, and research tasks, streamlining the process of finding specific information within large document collections.
|
||||
|
||||
## Installation
|
||||
Install the crewai_tools package by running the following command in your terminal:
|
||||
|
||||
```shell
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
## Example
|
||||
The following example demonstrates initializing the DOCXSearchTool to search within any DOCX file's content or with a specific DOCX file path.
|
||||
|
||||
```python
|
||||
from crewai_tools import DOCXSearchTool
|
||||
|
||||
# Initialize the tool to search within any DOCX file's content
|
||||
tool = DOCXSearchTool()
|
||||
|
||||
# OR
|
||||
|
||||
# Initialize the tool with a specific DOCX file, so the agent can only search the content of the specified DOCX file
|
||||
tool = DOCXSearchTool(docx='path/to/your/document.docx')
|
||||
```
|
||||
|
||||
## Arguments
|
||||
- `docx`: An optional file path to a specific DOCX document you wish to search. If not provided during initialization, the tool allows for later specification of any DOCX file's content path for searching.
|
||||
36
docs/tools/DirectoryReadTool.md
Normal file
36
docs/tools/DirectoryReadTool.md
Normal file
@@ -0,0 +1,36 @@
|
||||
# DirectoryReadTool
|
||||
|
||||
!!! note "Experimental"
|
||||
We are still working on improving tools, so there might be unexpected behavior or changes in the future.
|
||||
|
||||
## Description
|
||||
The DirectoryReadTool is a highly efficient utility designed for the comprehensive listing of directory contents. It recursively navigates through the specified directory, providing users with a detailed enumeration of all files, including those nested within subdirectories. This tool is indispensable for tasks requiring a thorough inventory of directory structures or for validating the organization of files within directories.
|
||||
|
||||
## Installation
|
||||
Install the `crewai_tools` package to use the DirectoryReadTool in your project. If you haven't added this package to your environment, you can easily install it with pip using the following command:
|
||||
|
||||
```shell
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
This installs the latest version of the `crewai_tools` package, allowing access to the DirectoryReadTool and other utilities.
|
||||
|
||||
## Example
|
||||
The DirectoryReadTool is simple to use. The code snippet below shows how to set up and use the tool to list the contents of a specified directory:
|
||||
|
||||
```python
|
||||
from crewai_tools import DirectoryReadTool
|
||||
|
||||
# Initialize the tool so the agent can read any directory's content it learns about during execution
|
||||
tool = DirectoryReadTool()
|
||||
|
||||
# OR
|
||||
|
||||
# Initialize the tool with a specific directory, so the agent can only read the content of the specified directory
|
||||
tool = DirectoryReadTool(directory='/path/to/your/directory')
|
||||
```
|
||||
|
||||
## Arguments
|
||||
The DirectoryReadTool requires minimal configuration for use. The essential argument for this tool is as follows:
|
||||
|
||||
- `directory`: **Optional** A argument that specifies the path to the directory whose contents you wish to list. It accepts both absolute and relative paths, guiding the tool to the desired directory for content listing.
|
||||
33
docs/tools/DirectorySearchTool.md
Normal file
33
docs/tools/DirectorySearchTool.md
Normal file
@@ -0,0 +1,33 @@
|
||||
# DirectorySearchTool
|
||||
|
||||
!!! note "Depend on OpenAI"
|
||||
All RAG tools at the moment can only use openAI to generate embeddings, we are working on adding support for other providers.
|
||||
|
||||
!!! note "Experimental"
|
||||
We are still working on improving tools, so there might be unexpected behavior or changes in the future.
|
||||
|
||||
## Description
|
||||
This tool is designed to perform a semantic search for queries within the content of a specified directory. Utilizing the RAG (Retrieval-Augmented Generation) methodology, it offers a powerful means to semantically navigate through the files of a given directory. The tool can be dynamically set to search any directory specified at runtime or can be pre-configured to search within a specific directory upon initialization.
|
||||
|
||||
## Installation
|
||||
To start using the DirectorySearchTool, you need to install the crewai_tools package. Execute the following command in your terminal:
|
||||
|
||||
```shell
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
## Example
|
||||
The following examples demonstrate how to initialize the DirectorySearchTool for different use cases and how to perform a search:
|
||||
|
||||
```python
|
||||
from crewai_tools import DirectorySearchTool
|
||||
|
||||
# To enable searching within any specified directory at runtime
|
||||
tool = DirectorySearchTool()
|
||||
|
||||
# Alternatively, to restrict searches to a specific directory
|
||||
tool = DirectorySearchTool(directory='/path/to/directory')
|
||||
```
|
||||
|
||||
## Arguments
|
||||
- `directory` : This string argument specifies the directory within which to search. It is mandatory if the tool has not been initialized with a directory; otherwise, the tool will only search within the initialized directory.
|
||||
32
docs/tools/FileReadTool.md
Normal file
32
docs/tools/FileReadTool.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# FileReadTool
|
||||
|
||||
!!! note "Experimental"
|
||||
We are still working on improving tools, so there might be unexpected behavior or changes in the future.
|
||||
|
||||
## Description
|
||||
The FileReadTool is a versatile component of the crewai_tools package, designed to streamline the process of reading and retrieving content from files. It is particularly useful in scenarios such as batch text file processing, runtime configuration file reading, and data importation for analytics. This tool supports various text-based file formats including `.txt`, `.csv`, `.json` and more, and adapts its functionality based on the file type, for instance, converting JSON content into a Python dictionary for easy use.
|
||||
|
||||
## Installation
|
||||
Install the crewai_tools package to use the FileReadTool in your projects:
|
||||
|
||||
```shell
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
## Example
|
||||
To get started with the FileReadTool:
|
||||
|
||||
```python
|
||||
from crewai_tools import FileReadTool
|
||||
|
||||
# Initialize the tool to read any files the agents knows or lean the path for
|
||||
file_read_tool = FileReadTool()
|
||||
|
||||
# OR
|
||||
|
||||
# Initialize the tool with a specific file path, so the agent can only read the content of the specified file
|
||||
file_read_tool = FileReadTool(file_path='path/to/your/file.txt')
|
||||
```
|
||||
|
||||
## Arguments
|
||||
- `file_path`: The path to the file you want to read. It accepts both absolute and relative paths. Ensure the file exists and you have the necessary permissions to access it.
|
||||
42
docs/tools/GitHubSearchTool.md
Normal file
42
docs/tools/GitHubSearchTool.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# GitHubSearchTool
|
||||
|
||||
!!! note "Depend on OpenAI"
|
||||
All RAG tools at the moment can only use openAI to generate embeddings, we are working on adding support for other providers.
|
||||
|
||||
!!! note "Experimental"
|
||||
We are still working on improving tools, so there might be unexpected behavior or changes in the future.
|
||||
|
||||
## Description
|
||||
The GitHubSearchTool is a Read, Append, and Generate (RAG) tool specifically designed for conducting semantic searches within GitHub repositories. Utilizing advanced semantic search capabilities, it sifts through code, pull requests, issues, and repositories, making it an essential tool for developers, researchers, or anyone in need of precise information from GitHub.
|
||||
|
||||
## Installation
|
||||
To use the GitHubSearchTool, first ensure the crewai_tools package is installed in your Python environment:
|
||||
|
||||
```shell
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
This command installs the necessary package to run the GitHubSearchTool along with any other tools included in the crewai_tools package.
|
||||
|
||||
## Example
|
||||
Here’s how you can use the GitHubSearchTool to perform semantic searches within a GitHub repository:
|
||||
```python
|
||||
from crewai_tools import GitHubSearchTool
|
||||
|
||||
# Initialize the tool for semantic searches within a specific GitHub repository
|
||||
tool = GitHubSearchTool(
|
||||
github_repo='https://github.com/example/repo',
|
||||
content_types=['code', 'issue'] # Options: code, repo, pr, issue
|
||||
)
|
||||
|
||||
# OR
|
||||
|
||||
# Initialize the tool for semantic searches within a specific GitHub repository, so the agent can search any repository if it learns about during its execution
|
||||
tool = GitHubSearchTool(
|
||||
content_types=['code', 'issue'] # Options: code, repo, pr, issue
|
||||
)
|
||||
```
|
||||
|
||||
## Arguments
|
||||
- `github_repo` : The URL of the GitHub repository where the search will be conducted. This is a mandatory field and specifies the target repository for your search.
|
||||
- `content_types` : Specifies the types of content to include in your search. You must provide a list of content types from the following options: `code` for searching within the code, `repo` for searching within the repository's general information, `pr` for searching within pull requests, and `issue` for searching within issues. This field is mandatory and allows tailoring the search to specific content types within the GitHub repository.
|
||||
33
docs/tools/JSONSearchTool.md
Normal file
33
docs/tools/JSONSearchTool.md
Normal file
@@ -0,0 +1,33 @@
|
||||
# JSONSearchTool
|
||||
|
||||
!!! note "Depend on OpenAI"
|
||||
All RAG tools at the moment can only use openAI to generate embeddings, we are working on adding support for other providers.
|
||||
|
||||
!!! note "Experimental"
|
||||
We are still working on improving tools, so there might be unexpected behavior or changes in the future.
|
||||
|
||||
## Description
|
||||
This tool is used to perform a RAG search within a JSON file's content. It allows users to initiate a search with a specific JSON path, focusing the search operation within that particular JSON file. If the path is provided at initialization, the tool restricts its search scope to the specified JSON file, thereby enhancing the precision of search results.
|
||||
|
||||
## Installation
|
||||
Install the crewai_tools package by executing the following command in your terminal:
|
||||
|
||||
```shell
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
## Example
|
||||
Below are examples demonstrating how to use the JSONSearchTool for searching within JSON files. You can either search any JSON content or restrict the search to a specific JSON file.
|
||||
|
||||
```python
|
||||
from crewai_tools import JSONSearchTool
|
||||
|
||||
# Example 1: Initialize the tool for a general search across any JSON content. This is useful when the path is known or can be discovered during execution.
|
||||
tool = JSONSearchTool()
|
||||
|
||||
# Example 2: Initialize the tool with a specific JSON path, limiting the search to a particular JSON file.
|
||||
tool = JSONSearchTool(json_path='./path/to/your/file.json')
|
||||
```
|
||||
|
||||
## Arguments
|
||||
- `json_path` (str): An optional argument that defines the path to the JSON file to be searched. This parameter is only necessary if the tool is initialized without a specific JSON path. Providing this argument restricts the search to the specified JSON file.
|
||||
35
docs/tools/MDXSearchTool.md
Normal file
35
docs/tools/MDXSearchTool.md
Normal file
@@ -0,0 +1,35 @@
|
||||
# MDXSearchTool
|
||||
|
||||
!!! note "Depend on OpenAI"
|
||||
All RAG tools at the moment can only use openAI to generate embeddings, we are working on adding support for other providers.
|
||||
|
||||
!!! note "Experimental"
|
||||
We are still working on improving tools, so there might be unexpected behavior or changes in the future.
|
||||
|
||||
## Description
|
||||
The MDX Search Tool, a key component of the `crewai_tools` package, is designed for advanced market data extraction, offering invaluable support to researchers and analysts requiring immediate market insights in the AI sector. With its ability to interface with various data sources and tools, it streamlines the process of acquiring, reading, and organizing market data efficiently.
|
||||
|
||||
## Installation
|
||||
To utilize the MDX Search Tool, ensure the `crewai_tools` package is installed. If not already present, install it using the following command:
|
||||
|
||||
```shell
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
## Example
|
||||
Configuring and using the MDX Search Tool involves setting up environment variables and utilizing the tool within a crewAI project for market research. Here's a simple example:
|
||||
|
||||
```python
|
||||
from crewai_tools import MDXSearchTool
|
||||
|
||||
# Initialize the tool so the agent can search any MDX content if it learns about during its execution
|
||||
tool = MDXSearchTool()
|
||||
|
||||
# OR
|
||||
|
||||
# Initialize the tool with a specific MDX file path for exclusive search within that document
|
||||
tool = MDXSearchTool(mdx='path/to/your/document.mdx')
|
||||
```
|
||||
|
||||
## Arguments
|
||||
- mdx: **Optional** The MDX path for the search. Can be provided at initialization
|
||||
35
docs/tools/PDFSearchTool.md
Normal file
35
docs/tools/PDFSearchTool.md
Normal file
@@ -0,0 +1,35 @@
|
||||
# PDFSearchTool
|
||||
|
||||
!!! note "Depend on OpenAI"
|
||||
All RAG tools at the moment can only use openAI to generate embeddings, we are working on adding support for other providers.
|
||||
|
||||
!!! note "Experimental"
|
||||
We are still working on improving tools, so there might be unexpected behavior or changes in the future.
|
||||
|
||||
## Description
|
||||
The PDFSearchTool is a RAG tool designed for semantic searches within PDF content. It allows for inputting a search query and a PDF document, leveraging advanced search techniques to find relevant content efficiently. This capability makes it especially useful for extracting specific information from large PDF files quickly.
|
||||
|
||||
## Installation
|
||||
To get started with the PDFSearchTool, first, ensure the crewai_tools package is installed with the following command:
|
||||
|
||||
```shell
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
## Example
|
||||
Here's how to use the PDFSearchTool to search within a PDF document:
|
||||
|
||||
```python
|
||||
from crewai_tools import PDFSearchTool
|
||||
|
||||
# Initialize the tool allowing for any PDF content search if the path is provided during execution
|
||||
tool = PDFSearchTool()
|
||||
|
||||
# OR
|
||||
|
||||
# Initialize the tool with a specific PDF path for exclusive search within that document
|
||||
tool = PDFSearchTool(pdf='path/to/your/document.pdf')
|
||||
```
|
||||
|
||||
## Arguments
|
||||
- `pdf`: **Optinal** The PDF path for the search. Can be provided at initialization or within the `run` method's arguments. If provided at initialization, the tool confines its search to the specified document.
|
||||
34
docs/tools/PGSearchTool.md
Normal file
34
docs/tools/PGSearchTool.md
Normal file
@@ -0,0 +1,34 @@
|
||||
# PGSearchTool
|
||||
|
||||
!!! note "Depend on OpenAI"
|
||||
All RAG tools at the moment can only use openAI to generate embeddings, we are working on adding support for other providers.
|
||||
|
||||
!!! note "Experimental"
|
||||
We are still working on improving tools, so there might be unexpected behavior or changes in the future.
|
||||
|
||||
## Description
|
||||
This tool is designed to facilitate semantic searches within PostgreSQL database tables. Leveraging the RAG (Retrieve and Generate) technology, the PGSearchTool provides users with an efficient means of querying database table content, specifically tailored for PostgreSQL databases. It simplifies the process of finding relevant data through semantic search queries, making it an invaluable resource for users needing to perform advanced queries on extensive datasets within a PostgreSQL database.
|
||||
|
||||
## Installation
|
||||
To install the `crewai_tools` package and utilize the PGSearchTool, execute the following command in your terminal:
|
||||
|
||||
```shell
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
## Example
|
||||
Below is an example showcasing how to use the PGSearchTool to conduct a semantic search on a table within a PostgreSQL database:
|
||||
|
||||
```python
|
||||
from crewai_tools import PGSearchTool
|
||||
|
||||
# Initialize the tool with the database URI and the target table name
|
||||
tool = PGSearchTool(db_uri='postgresql://user:password@localhost:5432/mydatabase', table_name='employees')
|
||||
|
||||
```
|
||||
|
||||
## Arguments
|
||||
The PGSearchTool requires the following arguments for its operation:
|
||||
|
||||
- `db_uri`: A string representing the URI of the PostgreSQL database to be queried. This argument is mandatory and must include the necessary authentication details and the location of the database.
|
||||
- `table_name`: A string specifying the name of the table within the database on which the semantic search will be performed. This argument is mandatory.
|
||||
30
docs/tools/ScrapeWebsiteTool.md
Normal file
30
docs/tools/ScrapeWebsiteTool.md
Normal file
@@ -0,0 +1,30 @@
|
||||
# ScrapeWebsiteTool
|
||||
|
||||
!!! note "Depend on OpenAI"
|
||||
All RAG tools at the moment can only use openAI to generate embeddings, we are working on adding support for other providers.
|
||||
|
||||
!!! note "Experimental"
|
||||
We are still working on improving tools, so there might be unexpected behavior or changes in the future.
|
||||
|
||||
## Description
|
||||
A tool designed to extract and read the content of a specified website. It is capable of handling various types of web pages by making HTTP requests and parsing the received HTML content. This tool can be particularly useful for web scraping tasks, data collection, or extracting specific information from websites.
|
||||
|
||||
## Installation
|
||||
Install the crewai_tools package
|
||||
```shell
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
## Example
|
||||
```python
|
||||
from crewai_tools import ScrapeWebsiteTool
|
||||
|
||||
# To enable scrapping any website it finds during it's execution
|
||||
tool = ScrapeWebsiteTool()
|
||||
|
||||
# Initialize the tool with the website URL, so the agent can only scrap the content of the specified website
|
||||
tool = ScrapeWebsiteTool(website_url='https://www.example.com')
|
||||
```
|
||||
|
||||
## Arguments
|
||||
- `website_url` : Mandatory website URL to read the file. This is the primary input for the tool, specifying which website's content should be scraped and read.
|
||||
36
docs/tools/SeleniumScrapingTool.md
Normal file
36
docs/tools/SeleniumScrapingTool.md
Normal file
@@ -0,0 +1,36 @@
|
||||
# SeleniumScrapingTool
|
||||
|
||||
!!! note "Experimental"
|
||||
We are still working on improving tools, so there might be unexpected behavior or changes in the future.
|
||||
|
||||
## Description
|
||||
This tool is designed for efficient web scraping, enabling users to extract content from web pages. It supports targeted scraping by allowing the specification of a CSS selector for desired elements. The flexibility of the tool enables it to be used on any website URL provided by the user, making it a versatile tool for various web scraping needs.
|
||||
|
||||
## Installation
|
||||
Install the crewai_tools package
|
||||
```
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
## Example
|
||||
```python
|
||||
from crewai_tools import SeleniumScrapingTool
|
||||
|
||||
# Example 1: Scrape any website it finds during its execution
|
||||
tool = SeleniumScrapingTool()
|
||||
|
||||
# Example 2: Scrape the entire webpage
|
||||
tool = SeleniumScrapingTool(website_url='https://example.com')
|
||||
|
||||
# Example 3: Scrape a specific CSS element from the webpage
|
||||
tool = SeleniumScrapingTool(website_url='https://example.com', css_element='.main-content')
|
||||
|
||||
# Example 4: Scrape using optional parameters for customized scraping
|
||||
tool = SeleniumScrapingTool(website_url='https://example.com', css_element='.main-content', cookie={'name': 'user', 'value': 'John Doe'})
|
||||
```
|
||||
|
||||
## Arguments
|
||||
- `website_url`: Mandatory. The URL of the website to scrape.
|
||||
- `css_element`: Mandatory. The CSS selector for a specific element to scrape from the website.
|
||||
- `cookie`: Optional. A dictionary containing cookie information. This parameter allows the tool to simulate a session with cookie information, providing access to content that may be restricted to logged-in users.
|
||||
- `wait_time`: Optional. The number of seconds the tool waits after loading the website and after setting a cookie, before scraping the content. This allows for dynamic content to load properly.
|
||||
33
docs/tools/SerperDevTool.md
Normal file
33
docs/tools/SerperDevTool.md
Normal file
@@ -0,0 +1,33 @@
|
||||
# SerperDevTool Documentation
|
||||
|
||||
!!! note "Experimental"
|
||||
We are still working on improving tools, so there might be unexpected behavior or changes in the future.
|
||||
|
||||
## Description
|
||||
This tool is designed to perform a semantic search for a specified query from a text's content across the internet. It utilizes the [serper.dev](https://serper.dev) API to fetch and display the most relevant search results based on the query provided by the user.
|
||||
|
||||
## Installation
|
||||
To incorporate this tool into your project, follow the installation instructions below:
|
||||
```shell
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
## Example
|
||||
The following example demonstrates how to initialize the tool and execute a search with a given query:
|
||||
|
||||
```python
|
||||
from crewai_tools import SerperDevTool
|
||||
|
||||
# Initialize the tool for internet searching capabilities
|
||||
tool = SerperDevTool()
|
||||
```
|
||||
|
||||
## Steps to Get Started
|
||||
To effectively use the `SerperDevTool`, follow these steps:
|
||||
|
||||
1. **Package Installation**: Confirm that the `crewai[tools]` package is installed in your Python environment.
|
||||
2. **API Key Acquisition**: Acquire a `serper.dev` API key by registering for a free account at `serper.dev`.
|
||||
3. **Environment Configuration**: Store your obtained API key in an environment variable named `SERPER_API_KEY` to facilitate its use by the tool.
|
||||
|
||||
## Conclusion
|
||||
By integrating the `SerperDevTool` into Python projects, users gain the ability to conduct real-time, relevant searches across the internet directly from their applications. By adhering to the setup and usage guidelines provided, incorporating this tool into projects is streamlined and straightforward.
|
||||
37
docs/tools/TXTSearchTool.md
Normal file
37
docs/tools/TXTSearchTool.md
Normal file
@@ -0,0 +1,37 @@
|
||||
# TXTSearchTool
|
||||
|
||||
!!! note "Depend on OpenAI"
|
||||
All RAG tools at the moment can only use openAI to generate embeddings, we are working on adding support for other providers.
|
||||
|
||||
!!! note "Experimental"
|
||||
We are still working on improving tools, so there might be unexpected behavior or changes in the future.
|
||||
|
||||
## Description
|
||||
This tool is used to perform a RAG (Retrieval-Augmented Generation) search within the content of a text file. It allows for semantic searching of a query within a specified text file's content, making it an invaluable resource for quickly extracting information or finding specific sections of text based on the query provided.
|
||||
|
||||
## Installation
|
||||
To use the TXTSearchTool, you first need to install the crewai_tools package. This can be done using pip, a package manager for Python. Open your terminal or command prompt and enter the following command:
|
||||
|
||||
```shell
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
This command will download and install the TXTSearchTool along with any necessary dependencies.
|
||||
|
||||
## Example
|
||||
The following example demonstrates how to use the TXTSearchTool to search within a text file. This example shows both the initialization of the tool with a specific text file and the subsequent search within that file's content.
|
||||
|
||||
```python
|
||||
from crewai_tools import TXTSearchTool
|
||||
|
||||
# Initialize the tool to search within any text file's content the agent learns about during its execution
|
||||
tool = TXTSearchTool()
|
||||
|
||||
# OR
|
||||
|
||||
# Initialize the tool with a specific text file, so the agent can search within the given text file's content
|
||||
tool = TXTSearchTool(txt='path/to/text/file.txt')
|
||||
```
|
||||
|
||||
## Arguments
|
||||
- `txt` (str): **Optinal**. The path to the text file you want to search. This argument is only required if the tool was not initialized with a specific text file; otherwise, the search will be conducted within the initially provided text file.
|
||||
35
docs/tools/WebsiteSearchTool.md
Normal file
35
docs/tools/WebsiteSearchTool.md
Normal file
@@ -0,0 +1,35 @@
|
||||
# WebsiteSearchTool
|
||||
|
||||
!!! note "Depend on OpenAI"
|
||||
All RAG tools at the moment can only use openAI to generate embeddings, we are working on adding support for other providers.
|
||||
|
||||
!!! note "Experimental"
|
||||
We are still working on improving tools, so there might be unexpected behavior or changes in the future.
|
||||
|
||||
## Description
|
||||
This tool is specifically crafted for conducting semantic searches within the content of a particular website. Leveraging a Retrieval-Augmented Generation (RAG) model, it navigates through the information provided on a given URL. Users have the flexibility to either initiate a search across any website known or discovered during its usage or to concentrate the search on a predefined, specific website.
|
||||
|
||||
## Installation
|
||||
Install the crewai_tools package by executing the following command in your terminal:
|
||||
|
||||
```shell
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
## Example
|
||||
To utilize the WebsiteSearchTool for different use cases, follow these examples:
|
||||
|
||||
```python
|
||||
from crewai_tools import WebsiteSearchTool
|
||||
|
||||
# To enable the tool to search any website the agent comes across or learns about during its operation
|
||||
tool = WebsiteSearchTool()
|
||||
|
||||
# OR
|
||||
|
||||
# To restrict the tool to only search within the content of a specific website.
|
||||
tool = WebsiteSearchTool(website='https://example.com')
|
||||
```
|
||||
|
||||
## Arguments
|
||||
- `website` : An optional argument that specifies the valid website URL to perform the search on. This becomes necessary if the tool is initialized without a specific website. In the `WebsiteSearchToolSchema`, this argument is mandatory. However, in the `FixedWebsiteSearchToolSchema`, it becomes optional if a website is provided during the tool's initialization, as it will then only search within the predefined website's content.
|
||||
35
docs/tools/XMLSearchTool copy.md
Normal file
35
docs/tools/XMLSearchTool copy.md
Normal file
@@ -0,0 +1,35 @@
|
||||
# XMLSearchTool
|
||||
|
||||
!!! note "Depend on OpenAI"
|
||||
All RAG tools at the moment can only use openAI to generate embeddings, we are working on adding support for other providers.
|
||||
|
||||
!!! note "Experimental"
|
||||
We are still working on improving tools, so there might be unexpected behavior or changes in the future.
|
||||
|
||||
## Description
|
||||
The XMLSearchTool is a cutting-edge RAG tool engineered for conducting semantic searches within XML files. Ideal for users needing to parse and extract information from XML content efficiently, this tool supports inputting a search query and an optional XML file path. By specifying an XML path, users can target their search more precisely to the content of that file, thereby obtaining more relevant search outcomes.
|
||||
|
||||
## Installation
|
||||
To start using the XMLSearchTool, you must first install the crewai_tools package. This can be easily done with the following command:
|
||||
|
||||
```shell
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
## Example
|
||||
Here are two examples demonstrating how to use the XMLSearchTool. The first example shows searching within a specific XML file, while the second example illustrates initiating a search without predefining an XML path, providing flexibility in search scope.
|
||||
|
||||
```python
|
||||
from crewai_tools.tools.xml_search_tool import XMLSearchTool
|
||||
|
||||
# Allow agents to search within any XML file's content as it learns about their paths during execution
|
||||
tool = XMLSearchTool()
|
||||
|
||||
# OR
|
||||
|
||||
# Initialize the tool with a specific XML file path for exclusive search within that document
|
||||
tool = XMLSearchTool(xml='path/to/your/xmlfile.xml')
|
||||
```
|
||||
|
||||
## Arguments
|
||||
- `xml`: This is the path to the XML file you wish to search. It is an optional parameter during the tool's initialization but must be provided either at initialization or as part of the `run` method's arguments to execute a search.
|
||||
35
docs/tools/XMLSearchTool.md
Normal file
35
docs/tools/XMLSearchTool.md
Normal file
@@ -0,0 +1,35 @@
|
||||
# XMLSearchTool
|
||||
|
||||
!!! note "Depend on OpenAI"
|
||||
All RAG tools at the moment can only use openAI to generate embeddings, we are working on adding support for other providers.
|
||||
|
||||
!!! note "Experimental"
|
||||
We are still working on improving tools, so there might be unexpected behavior or changes in the future.
|
||||
|
||||
## Description
|
||||
The XMLSearchTool is a cutting-edge RAG tool engineered for conducting semantic searches within XML files. Ideal for users needing to parse and extract information from XML content efficiently, this tool supports inputting a search query and an optional XML file path. By specifying an XML path, users can target their search more precisely to the content of that file, thereby obtaining more relevant search outcomes.
|
||||
|
||||
## Installation
|
||||
To start using the XMLSearchTool, you must first install the crewai_tools package. This can be easily done with the following command:
|
||||
|
||||
```shell
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
## Example
|
||||
Here are two examples demonstrating how to use the XMLSearchTool. The first example shows searching within a specific XML file, while the second example illustrates initiating a search without predefining an XML path, providing flexibility in search scope.
|
||||
|
||||
```python
|
||||
from crewai_tools.tools.xml_search_tool import XMLSearchTool
|
||||
|
||||
# Allow agents to search within any XML file's content as it learns about their paths during execution
|
||||
tool = XMLSearchTool()
|
||||
|
||||
# OR
|
||||
|
||||
# Initialize the tool with a specific XML file path for exclusive search within that document
|
||||
tool = XMLSearchTool(xml='path/to/your/xmlfile.xml')
|
||||
```
|
||||
|
||||
## Arguments
|
||||
- `xml`: This is the path to the XML file you wish to search. It is an optional parameter during the tool's initialization but must be provided either at initialization or as part of the `run` method's arguments to execute a search.
|
||||
35
docs/tools/YoutubeChannelSearchTool.md
Normal file
35
docs/tools/YoutubeChannelSearchTool.md
Normal file
@@ -0,0 +1,35 @@
|
||||
# YoutubeChannelSearchTool
|
||||
|
||||
!!! note "Depend on OpenAI"
|
||||
All RAG tools at the moment can only use openAI to generate embeddings, we are working on adding support for other providers.
|
||||
|
||||
!!! note "Experimental"
|
||||
We are still working on improving tools, so there might be unexpected behavior or changes in the future.
|
||||
|
||||
## Description
|
||||
This tool is designed to perform semantic searches within a specific Youtube channel's content. Leveraging the RAG (Retrieval-Augmented Generation) methodology, it provides relevant search results, making it invaluable for extracting information or finding specific content without the need to manually sift through videos. It streamlines the search process within Youtube channels, catering to researchers, content creators, and viewers seeking specific information or topics.
|
||||
|
||||
## Installation
|
||||
To utilize the YoutubeChannelSearchTool, the `crewai_tools` package must be installed. Execute the following command in your shell to install:
|
||||
|
||||
```shell
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
## Example
|
||||
To begin using the YoutubeChannelSearchTool, follow the example below. This demonstrates initializing the tool with a specific Youtube channel handle and conducting a search within that channel's content.
|
||||
|
||||
```python
|
||||
from crewai_tools import YoutubeChannelSearchTool
|
||||
|
||||
# Initialize the tool to search within any Youtube channel's content the agent learns about during its execution
|
||||
tool = YoutubeChannelSearchTool()
|
||||
|
||||
# OR
|
||||
|
||||
# Initialize the tool with a specific Youtube channel handle to target your search
|
||||
tool = YoutubeChannelSearchTool(youtube_channel_handle='@exampleChannel')
|
||||
```
|
||||
|
||||
## Arguments
|
||||
- `youtube_channel_handle` : A mandatory string representing the Youtube channel handle. This parameter is crucial for initializing the tool to specify the channel you want to search within. The tool is designed to only search within the content of the provided channel handle.
|
||||
38
docs/tools/YoutubeVideoSearchTool.md
Normal file
38
docs/tools/YoutubeVideoSearchTool.md
Normal file
@@ -0,0 +1,38 @@
|
||||
# YoutubeVideoSearchTool
|
||||
|
||||
!!! note "Depend on OpenAI"
|
||||
All RAG tools at the moment can only use openAI to generate embeddings, we are working on adding support for other providers.
|
||||
|
||||
!!! note "Experimental"
|
||||
We are still working on improving tools, so there might be unexpected behavior or changes in the future.
|
||||
|
||||
## Description
|
||||
|
||||
This tool is part of the `crewai_tools` package and is designed to perform semantic searches within Youtube video content, utilizing Retrieval-Augmented Generation (RAG) techniques. It is one of several "Search" tools in the package that leverage RAG for different sources. The YoutubeVideoSearchTool allows for flexibility in searches; users can search across any Youtube video content without specifying a video URL, or they can target their search to a specific Youtube video by providing its URL.
|
||||
|
||||
## Installation
|
||||
|
||||
To utilize the YoutubeVideoSearchTool, you must first install the `crewai_tools` package. This package contains the YoutubeVideoSearchTool among other utilities designed to enhance your data analysis and processing tasks. Install the package by executing the following command in your terminal:
|
||||
|
||||
```
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
## Example
|
||||
|
||||
To integrate the YoutubeVideoSearchTool into your Python projects, follow the example below. This demonstrates how to use the tool both for general Youtube content searches and for targeted searches within a specific video's content.
|
||||
|
||||
```python
|
||||
from crewai_tools import YoutubeVideoSearchTool
|
||||
|
||||
# General search across Youtube content without specifying a video URL, so the agent can search within any Youtube video content it learns about irs url during its operation
|
||||
tool = YoutubeVideoSearchTool()
|
||||
|
||||
# Targeted search within a specific Youtube video's content
|
||||
tool = YoutubeVideoSearchTool(youtube_video_url='https://youtube.com/watch?v=example')
|
||||
```
|
||||
## Arguments
|
||||
|
||||
The YoutubeVideoSearchTool accepts the following initialization arguments:
|
||||
|
||||
- `youtube_video_url`: An optional argument at initialization but required if targeting a specific Youtube video. It specifies the Youtube video URL path you want to search within.
|
||||
21
mkdocs.yml
21
mkdocs.yml
@@ -128,11 +128,32 @@ nav:
|
||||
- Collaboration: 'core-concepts/Collaboration.md'
|
||||
- How to Guides:
|
||||
- Getting Started: 'how-to/Creating-a-Crew-and-kick-it-off.md'
|
||||
- Create Custom Tools: 'how-to/Create-Custom-Tools.md'
|
||||
- Using Sequential Process: 'how-to/Sequential.md'
|
||||
- Using Hierarchical Process: 'how-to/Hierarchical.md'
|
||||
- Connecting to any LLM: 'how-to/LLM-Connections.md'
|
||||
- Customizing Agents: 'how-to/Customizing-Agents.md'
|
||||
- Human Input on Execution: 'how-to/Human-Input-on-Execution.md'
|
||||
- Tools Docs:
|
||||
- Google Serper Search: 'tools/SerperDevTool.md'
|
||||
- Scrape Website: 'tools/ScrapeWebsiteTool.md'
|
||||
- Directory Read: 'tools/DirectoryReadTool.md'
|
||||
- File Read: 'tools/FileReadTool.md'
|
||||
- Selenium Scraper: 'tools/SeleniumScrapingTool.md'
|
||||
- Directory RAG Search: 'tools/DirectorySearchTool.md'
|
||||
- PDF RAG Search: 'tools/PDFSearchTool.md'
|
||||
- TXT RAG Search: 'tools/TXTSearchTool.md'
|
||||
- CSV RAG Search: 'tools/CSVSearchTool.md'
|
||||
- XML RAG Search: 'tools/XMLSearchTool.md'
|
||||
- JSON RAG Search: 'tools/JSONSearchTool.md'
|
||||
- Docx Rag Search: 'tools/DOCXSearchTool.md'
|
||||
- MDX RAG Search: 'tools/MDXSearchTool.md'
|
||||
- PG RAG Search: 'tools/PGSearchTool.md'
|
||||
- Website RAG Search: 'tools/WebsiteSearchTool.md'
|
||||
- Github RAG Search: 'tools/GitHubSearchTool.md'
|
||||
- Code Docs RAG Search: 'tools/CodeDocsSearchTool.md'
|
||||
- Youtube Video RAG Search: 'tools/YoutubeVideoSearchTool.md'
|
||||
- Youtube Chanel RAG Search: 'tools/YoutubeChannelSearchTool.md'
|
||||
- Examples:
|
||||
- Trip Planner Crew: https://github.com/joaomdmoura/crewAI-examples/tree/main/trip_planner"
|
||||
- Create Instagram Post: https://github.com/joaomdmoura/crewAI-examples/tree/main/instagram_post"
|
||||
|
||||
3448
poetry.lock
generated
3448
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,7 +1,6 @@
|
||||
|
||||
[tool.poetry]
|
||||
name = "crewai"
|
||||
version = "0.5.5"
|
||||
version = "0.22.5"
|
||||
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
|
||||
authors = ["Joao Moura <joao@crewai.com>"]
|
||||
readme = "README.md"
|
||||
@@ -9,25 +8,32 @@ packages = [
|
||||
{ include = "crewai", from = "src" },
|
||||
]
|
||||
|
||||
|
||||
[tool.poetry.urls]
|
||||
Homepage = "https://crewai.io"
|
||||
Homepage = "https://crewai.com"
|
||||
Documentation = "https://github.com/joaomdmoura/CrewAI/wiki/Index"
|
||||
Repository = "https://github.com/joaomdmoura/crewai"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.10,<4.0"
|
||||
python = ">=3.10,<=3.13"
|
||||
pydantic = "^2.4.2"
|
||||
langchain = "0.1.0"
|
||||
openai = "^1.7.1"
|
||||
langchain-openai = "^0.0.2"
|
||||
langchain = "^0.1.10"
|
||||
openai = "^1.13.3"
|
||||
langchain-openai = "^0.0.5"
|
||||
opentelemetry-api = "^1.22.0"
|
||||
opentelemetry-sdk = "^1.22.0"
|
||||
opentelemetry-exporter-otlp-proto-http = "^1.22.0"
|
||||
instructor = "^0.5.2"
|
||||
regex = "^2023.12.25"
|
||||
crewai-tools = { version = "^0.0.15", optional = true }
|
||||
click = "^8.1.7"
|
||||
python-dotenv = "1.0.0"
|
||||
|
||||
[tool.poetry.extras]
|
||||
tools = ["crewai-tools"]
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
isort = "^5.13.2"
|
||||
pyright = "1.1.333"
|
||||
pyright = ">=1.1.350,<2.0.0"
|
||||
black = {git = "https://github.com/psf/black.git", rev = "stable"}
|
||||
autoflake = "^2.2.1"
|
||||
pre-commit = "^3.6.0"
|
||||
@@ -38,16 +44,20 @@ mkdocs-material = {extras = ["imaging"], version = "^9.5.7"}
|
||||
mkdocs-material-extensions = "^1.3.1"
|
||||
pillow = "^10.2.0"
|
||||
cairosvg = "^2.7.1"
|
||||
crewai_tools = "^0.0.15"
|
||||
|
||||
[tool.isort]
|
||||
profile = "black"
|
||||
known_first_party = ["crewai"]
|
||||
|
||||
[tool.poetry.group.test.dependencies]
|
||||
pytest = "^7.4"
|
||||
pytest = "^8.0.0"
|
||||
pytest-vcr = "^1.0.2"
|
||||
python-dotenv = "1.0.0"
|
||||
|
||||
[tool.poetry.scripts]
|
||||
crewai = "crewai.cli.cli:crewai"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
import os
|
||||
import uuid
|
||||
from typing import Any, List, Optional
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
from langchain.agents.agent import RunnableAgent
|
||||
from langchain.agents.format_scratchpad import format_log_to_str
|
||||
from langchain.agents.tools import tool as LangChainTool
|
||||
from langchain.memory import ConversationSummaryMemory
|
||||
from langchain.tools.render import render_text_description
|
||||
from langchain_core.runnables.config import RunnableConfig
|
||||
from langchain_core.agents import AgentAction
|
||||
from langchain_core.callbacks import BaseCallbackHandler
|
||||
from langchain_openai import ChatOpenAI
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
@@ -19,13 +21,9 @@ from pydantic import (
|
||||
)
|
||||
from pydantic_core import PydanticCustomError
|
||||
|
||||
from crewai.agents import (
|
||||
CacheHandler,
|
||||
CrewAgentExecutor,
|
||||
CrewAgentOutputParser,
|
||||
ToolsHandler,
|
||||
)
|
||||
from crewai.agents import CacheHandler, CrewAgentExecutor, CrewAgentParser, ToolsHandler
|
||||
from crewai.utilities import I18N, Logger, Prompts, RPMController
|
||||
from crewai.utilities.token_counter_callback import TokenCalcHandler, TokenProcess
|
||||
|
||||
|
||||
class Agent(BaseModel):
|
||||
@@ -39,20 +37,26 @@ class Agent(BaseModel):
|
||||
role: The role of the agent.
|
||||
goal: The objective of the agent.
|
||||
backstory: The backstory of the agent.
|
||||
config: Dict representation of agent configuration.
|
||||
llm: The language model that will run the agent.
|
||||
function_calling_llm: The language model that will the tool calling for this agent, it overrides the crew function_calling_llm.
|
||||
max_iter: Maximum number of iterations for an agent to execute a task.
|
||||
memory: Whether the agent should have memory or not.
|
||||
max_rpm: Maximum number of requests per minute for the agent execution to be respected.
|
||||
verbose: Whether the agent execution should be in verbose mode.
|
||||
allow_delegation: Whether the agent is allowed to delegate tasks to other agents.
|
||||
tools: Tools at agents disposal
|
||||
step_callback: Callback to be executed after each step of the agent execution.
|
||||
callbacks: A list of callback functions from the langchain library that are triggered during the agent's execution process
|
||||
"""
|
||||
|
||||
__hash__ = object.__hash__ # type: ignore
|
||||
_logger: Logger = PrivateAttr()
|
||||
_rpm_controller: RPMController = PrivateAttr(default=None)
|
||||
_request_within_rpm_limit: Any = PrivateAttr(default=None)
|
||||
_token_process: TokenProcess = TokenProcess()
|
||||
|
||||
formatting_errors: int = 0
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
id: UUID4 = Field(
|
||||
default_factory=uuid.uuid4,
|
||||
@@ -62,12 +66,16 @@ class Agent(BaseModel):
|
||||
role: str = Field(description="Role of the agent")
|
||||
goal: str = Field(description="Objective of the agent")
|
||||
backstory: str = Field(description="Backstory of the agent")
|
||||
config: Optional[Dict[str, Any]] = Field(
|
||||
description="Configuration for the agent",
|
||||
default=None,
|
||||
)
|
||||
max_rpm: Optional[int] = Field(
|
||||
default=None,
|
||||
description="Maximum number of requests per minute for the agent execution to be respected.",
|
||||
)
|
||||
memory: bool = Field(
|
||||
default=True, description="Whether the agent should have memory or not"
|
||||
default=False, description="Whether the agent should have memory or not"
|
||||
)
|
||||
verbose: bool = Field(
|
||||
default=False, description="Verbose mode for the Agent Execution"
|
||||
@@ -75,7 +83,7 @@ class Agent(BaseModel):
|
||||
allow_delegation: bool = Field(
|
||||
default=True, description="Allow delegation of tasks to agents"
|
||||
)
|
||||
tools: List[Any] = Field(
|
||||
tools: Optional[List[Any]] = Field(
|
||||
default_factory=list, description="Tools at agents disposal"
|
||||
)
|
||||
max_iter: Optional[int] = Field(
|
||||
@@ -90,13 +98,27 @@ class Agent(BaseModel):
|
||||
cache_handler: InstanceOf[CacheHandler] = Field(
|
||||
default=CacheHandler(), description="An instance of the CacheHandler class."
|
||||
)
|
||||
step_callback: Optional[Any] = Field(
|
||||
default=None,
|
||||
description="Callback to be executed after each step of the agent execution.",
|
||||
)
|
||||
i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
|
||||
llm: Any = Field(
|
||||
default_factory=lambda: ChatOpenAI(
|
||||
model="gpt-4",
|
||||
model=os.environ.get("OPENAI_MODEL_NAME", "gpt-4")
|
||||
),
|
||||
description="Language model that will run the agent.",
|
||||
)
|
||||
function_calling_llm: Optional[Any] = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
callbacks: Optional[List[InstanceOf[BaseCallbackHandler]]] = Field(
|
||||
default=None, description="Callback to be executed"
|
||||
)
|
||||
|
||||
def __init__(__pydantic_self__, **data):
|
||||
config = data.pop("config", {})
|
||||
super().__init__(**config, **data)
|
||||
|
||||
@field_validator("id", mode="before")
|
||||
@classmethod
|
||||
@@ -106,6 +128,14 @@ class Agent(BaseModel):
|
||||
"may_not_set_field", "This field is not to be set by the user.", {}
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_attributes_based_on_config(self) -> "Agent":
|
||||
"""Set attributes based on the agent configuration."""
|
||||
if self.config:
|
||||
for key, value in self.config.items():
|
||||
setattr(self, key, value)
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_private_attrs(self):
|
||||
"""Set private attributes."""
|
||||
@@ -117,15 +147,19 @@ class Agent(BaseModel):
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def check_agent_executor(self) -> "Agent":
|
||||
"""Check if the agent executor is set."""
|
||||
def set_agent_executor(self) -> "Agent":
|
||||
"""set agent executor is set."""
|
||||
if hasattr(self.llm, "model_name"):
|
||||
self.llm.callbacks = [
|
||||
TokenCalcHandler(self.llm.model_name, self._token_process)
|
||||
]
|
||||
if not self.agent_executor:
|
||||
self.set_cache_handler(self.cache_handler)
|
||||
return self
|
||||
|
||||
def execute_task(
|
||||
self,
|
||||
task: str,
|
||||
task: Any,
|
||||
context: Optional[str] = None,
|
||||
tools: Optional[List[Any]] = None,
|
||||
) -> str:
|
||||
@@ -139,22 +173,29 @@ class Agent(BaseModel):
|
||||
Returns:
|
||||
Output of the agent
|
||||
"""
|
||||
self.tools_handler.last_used_tool = {}
|
||||
|
||||
task_prompt = task.prompt()
|
||||
|
||||
if context:
|
||||
task = self.i18n.slice("task_with_context").format(
|
||||
task=task, context=context
|
||||
task_prompt = self.i18n.slice("task_with_context").format(
|
||||
task=task_prompt, context=context
|
||||
)
|
||||
|
||||
tools = tools or self.tools
|
||||
tools = self._parse_tools(tools or self.tools)
|
||||
self.create_agent_executor(tools=tools)
|
||||
self.agent_executor.tools = tools
|
||||
self.agent_executor.task = task
|
||||
|
||||
self.agent_executor.tools_description = render_text_description(tools)
|
||||
self.agent_executor.tools_names = self.__tools_names(tools)
|
||||
|
||||
result = self.agent_executor.invoke(
|
||||
{
|
||||
"input": task,
|
||||
"tool_names": self.__tools_names(tools),
|
||||
"tools": render_text_description(tools),
|
||||
},
|
||||
RunnableConfig(callbacks=[self.tools_handler]),
|
||||
"input": task_prompt,
|
||||
"tool_names": self.agent_executor.tools_names,
|
||||
"tools": self.agent_executor.tools_description,
|
||||
}
|
||||
)["output"]
|
||||
|
||||
if self.max_rpm:
|
||||
@@ -170,7 +211,7 @@ class Agent(BaseModel):
|
||||
"""
|
||||
self.cache_handler = cache_handler
|
||||
self.tools_handler = ToolsHandler(cache=self.cache_handler)
|
||||
self._create_agent_executor()
|
||||
self.create_agent_executor()
|
||||
|
||||
def set_rpm_controller(self, rpm_controller: RPMController) -> None:
|
||||
"""Set the rpm controller for the agent.
|
||||
@@ -180,32 +221,42 @@ class Agent(BaseModel):
|
||||
"""
|
||||
if not self._rpm_controller:
|
||||
self._rpm_controller = rpm_controller
|
||||
self._create_agent_executor()
|
||||
self.create_agent_executor()
|
||||
|
||||
def _create_agent_executor(self) -> None:
|
||||
def create_agent_executor(self, tools=None) -> None:
|
||||
"""Create an agent executor for the agent.
|
||||
|
||||
Returns:
|
||||
An instance of the CrewAgentExecutor class.
|
||||
"""
|
||||
tools = tools or self.tools
|
||||
|
||||
agent_args = {
|
||||
"input": lambda x: x["input"],
|
||||
"tools": lambda x: x["tools"],
|
||||
"tool_names": lambda x: x["tool_names"],
|
||||
"agent_scratchpad": lambda x: format_log_to_str(x["intermediate_steps"]),
|
||||
"agent_scratchpad": lambda x: self.format_log_to_str(
|
||||
x["intermediate_steps"]
|
||||
),
|
||||
}
|
||||
|
||||
executor_args = {
|
||||
"llm": self.llm,
|
||||
"i18n": self.i18n,
|
||||
"tools": self.tools,
|
||||
"tools": self._parse_tools(tools),
|
||||
"verbose": self.verbose,
|
||||
"handle_parsing_errors": True,
|
||||
"max_iterations": self.max_iter,
|
||||
"step_callback": self.step_callback,
|
||||
"tools_handler": self.tools_handler,
|
||||
"function_calling_llm": self.function_calling_llm,
|
||||
"callbacks": self.callbacks,
|
||||
}
|
||||
|
||||
if self._rpm_controller:
|
||||
executor_args["request_within_rpm_limit"] = (
|
||||
self._rpm_controller.check_or_wait
|
||||
)
|
||||
executor_args[
|
||||
"request_within_rpm_limit"
|
||||
] = self._rpm_controller.check_or_wait
|
||||
|
||||
if self.memory:
|
||||
summary_memory = ConversationSummaryMemory(
|
||||
@@ -213,9 +264,9 @@ class Agent(BaseModel):
|
||||
)
|
||||
executor_args["memory"] = summary_memory
|
||||
agent_args["chat_history"] = lambda x: x["chat_history"]
|
||||
prompt = Prompts(i18n=self.i18n).task_execution_with_memory()
|
||||
prompt = Prompts(i18n=self.i18n, tools=tools).task_execution_with_memory()
|
||||
else:
|
||||
prompt = Prompts(i18n=self.i18n).task_execution()
|
||||
prompt = Prompts(i18n=self.i18n, tools=tools).task_execution()
|
||||
|
||||
execution_prompt = prompt.partial(
|
||||
goal=self.goal,
|
||||
@@ -224,20 +275,55 @@ class Agent(BaseModel):
|
||||
)
|
||||
|
||||
bind = self.llm.bind(stop=[self.i18n.slice("observation")])
|
||||
inner_agent = (
|
||||
agent_args
|
||||
| execution_prompt
|
||||
| bind
|
||||
| CrewAgentOutputParser(
|
||||
tools_handler=self.tools_handler,
|
||||
cache=self.cache_handler,
|
||||
i18n=self.i18n,
|
||||
)
|
||||
)
|
||||
inner_agent = agent_args | execution_prompt | bind | CrewAgentParser(agent=self)
|
||||
self.agent_executor = CrewAgentExecutor(
|
||||
agent=RunnableAgent(runnable=inner_agent), **executor_args
|
||||
)
|
||||
|
||||
def interpolate_inputs(self, inputs: Dict[str, Any]) -> None:
|
||||
"""Interpolate inputs into the agent description and backstory."""
|
||||
if inputs:
|
||||
self.role = self.role.format(**inputs)
|
||||
self.goal = self.goal.format(**inputs)
|
||||
self.backstory = self.backstory.format(**inputs)
|
||||
|
||||
def increment_formatting_errors(self) -> None:
|
||||
"""Count the formatting errors of the agent."""
|
||||
self.formatting_errors += 1
|
||||
|
||||
def format_log_to_str(
|
||||
self,
|
||||
intermediate_steps: List[Tuple[AgentAction, str]],
|
||||
observation_prefix: str = "Observation: ",
|
||||
llm_prefix: str = "",
|
||||
) -> str:
|
||||
"""Construct the scratchpad that lets the agent continue its thought process."""
|
||||
thoughts = ""
|
||||
for action, observation in intermediate_steps:
|
||||
thoughts += action.log
|
||||
thoughts += f"\n{observation_prefix}{observation}\n{llm_prefix}"
|
||||
return thoughts
|
||||
|
||||
def _parse_tools(self, tools: List[Any]) -> List[LangChainTool]:
|
||||
"""Parse tools to be used for the task."""
|
||||
# tentatively try to import from crewai_tools import BaseTool as CrewAITool
|
||||
tools_list = []
|
||||
try:
|
||||
from crewai_tools import BaseTool as CrewAITool
|
||||
|
||||
for tool in tools:
|
||||
if isinstance(tool, CrewAITool):
|
||||
tools_list.append(tool.to_langchain())
|
||||
else:
|
||||
tools_list.append(tool)
|
||||
except ModuleNotFoundError:
|
||||
for tool in tools:
|
||||
tools_list.append(tool)
|
||||
return tools_list
|
||||
|
||||
@staticmethod
|
||||
def __tools_names(tools) -> str:
|
||||
return ", ".join([t.name for t in tools])
|
||||
|
||||
def __repr__(self):
|
||||
return f"Agent(role={self.role}, goal={self.goal}, backstory={self.backstory})"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from .cache.cache_handler import CacheHandler
|
||||
from .executor import CrewAgentExecutor
|
||||
from .output_parser import CrewAgentOutputParser
|
||||
from .parser import CrewAgentParser
|
||||
from .tools_handler import ToolsHandler
|
||||
|
||||
1
src/crewai/agents/cache/__init__.py
vendored
1
src/crewai/agents/cache/__init__.py
vendored
@@ -1,2 +1 @@
|
||||
from .cache_handler import CacheHandler
|
||||
from .cache_hit import CacheHit
|
||||
|
||||
2
src/crewai/agents/cache/cache_handler.py
vendored
2
src/crewai/agents/cache/cache_handler.py
vendored
@@ -10,9 +10,7 @@ class CacheHandler:
|
||||
self._cache = {}
|
||||
|
||||
def add(self, tool, input, output):
|
||||
input = input.strip()
|
||||
self._cache[f"{tool}-{input}"] = output
|
||||
|
||||
def read(self, tool, input) -> Optional[str]:
|
||||
input = input.strip()
|
||||
return self._cache.get(f"{tool}-{input}")
|
||||
|
||||
18
src/crewai/agents/cache/cache_hit.py
vendored
18
src/crewai/agents/cache/cache_hit.py
vendored
@@ -1,18 +0,0 @@
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from .cache_handler import CacheHandler
|
||||
|
||||
|
||||
class CacheHit(BaseModel):
|
||||
"""Cache Hit Object."""
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
# Making it Any instead of AgentAction to avoind
|
||||
# pydantic v1 vs v2 incompatibility, langchain should
|
||||
# soon be updated to pydantic v2
|
||||
action: Any = Field(description="Action taken")
|
||||
cache: CacheHandler = Field(description="Cache Handler for the tool")
|
||||
@@ -1,30 +0,0 @@
|
||||
from langchain_core.exceptions import OutputParserException
|
||||
|
||||
from crewai.utilities import I18N
|
||||
|
||||
|
||||
class TaskRepeatedUsageException(OutputParserException):
|
||||
"""Exception raised when a task is used twice in a roll."""
|
||||
|
||||
i18n: I18N = I18N()
|
||||
error: str = "TaskRepeatedUsageException"
|
||||
message: str
|
||||
|
||||
def __init__(self, i18n: I18N, tool: str, tool_input: str, text: str):
|
||||
self.i18n = i18n
|
||||
self.text = text
|
||||
self.tool = tool
|
||||
self.tool_input = tool_input
|
||||
self.message = self.i18n.errors("task_repeated_usage").format(
|
||||
tool=tool, tool_input=tool_input
|
||||
)
|
||||
|
||||
super().__init__(
|
||||
error=self.error,
|
||||
observation=self.message,
|
||||
send_to_llm=True,
|
||||
llm_output=self.text,
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return self.message
|
||||
@@ -3,25 +3,33 @@ from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
|
||||
|
||||
from langchain.agents import AgentExecutor
|
||||
from langchain.agents.agent import ExceptionTool
|
||||
from langchain.agents.tools import InvalidTool
|
||||
from langchain.callbacks.manager import CallbackManagerForChainRun
|
||||
from langchain_core.agents import AgentAction, AgentFinish, AgentStep
|
||||
from langchain_core.exceptions import OutputParserException
|
||||
from langchain_core.pydantic_v1 import root_validator
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_core.utils.input import get_color_mapping
|
||||
from pydantic import InstanceOf
|
||||
|
||||
from crewai.agents.cache.cache_hit import CacheHit
|
||||
from crewai.tools.cache_tools import CacheTools
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
|
||||
from crewai.utilities import I18N
|
||||
|
||||
|
||||
class CrewAgentExecutor(AgentExecutor):
|
||||
i18n: I18N = I18N()
|
||||
_i18n: I18N = I18N()
|
||||
llm: Any = None
|
||||
iterations: int = 0
|
||||
task: Any = None
|
||||
tools_description: str = ""
|
||||
tools_names: str = ""
|
||||
function_calling_llm: Any = None
|
||||
request_within_rpm_limit: Any = None
|
||||
tools_handler: InstanceOf[ToolsHandler] = None
|
||||
max_iterations: Optional[int] = 15
|
||||
have_forced_answer: bool = False
|
||||
force_answer_max_iterations: Optional[int] = None
|
||||
step_callback: Optional[Any] = None
|
||||
|
||||
@root_validator()
|
||||
def set_force_answer_max_iterations(cls, values: Dict) -> Dict:
|
||||
@@ -29,12 +37,9 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
return values
|
||||
|
||||
def _should_force_answer(self) -> bool:
|
||||
return True if self.iterations == self.force_answer_max_iterations else False
|
||||
|
||||
def _force_answer(self, output: AgentAction):
|
||||
return AgentStep(
|
||||
action=output, observation=self.i18n.errors("force_final_answer")
|
||||
)
|
||||
return (
|
||||
self.iterations == self.force_answer_max_iterations
|
||||
) and not self.have_forced_answer
|
||||
|
||||
def _call(
|
||||
self,
|
||||
@@ -63,6 +68,10 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
intermediate_steps,
|
||||
run_manager=run_manager,
|
||||
)
|
||||
|
||||
if self.step_callback:
|
||||
self.step_callback(next_step_output)
|
||||
|
||||
if isinstance(next_step_output, AgentFinish):
|
||||
return self._return(
|
||||
next_step_output, intermediate_steps, run_manager=run_manager
|
||||
@@ -97,25 +106,20 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
Override this to take control of how the agent makes and acts on choices.
|
||||
"""
|
||||
try:
|
||||
intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)
|
||||
if self._should_force_answer():
|
||||
error = self._i18n.errors("force_final_answer")
|
||||
output = AgentAction("_Exception", error, error)
|
||||
self.have_forced_answer = True
|
||||
yield AgentStep(action=output, observation=error)
|
||||
return
|
||||
|
||||
intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)
|
||||
# Call the LLM to see what to do.
|
||||
output = self.agent.plan(
|
||||
intermediate_steps,
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
**inputs,
|
||||
)
|
||||
if self._should_force_answer():
|
||||
if isinstance(output, AgentAction) or isinstance(output, AgentFinish):
|
||||
output = output
|
||||
elif isinstance(output, CacheHit):
|
||||
output = output.action
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Unexpected output type from agent: {type(output)}"
|
||||
)
|
||||
yield self._force_answer(output)
|
||||
return
|
||||
|
||||
except OutputParserException as e:
|
||||
if isinstance(self.handle_parsing_errors, bool):
|
||||
@@ -129,33 +133,35 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
"again, pass `handle_parsing_errors=True` to the AgentExecutor. "
|
||||
f"This is the error: {str(e)}"
|
||||
)
|
||||
text = str(e)
|
||||
str(e)
|
||||
if isinstance(self.handle_parsing_errors, bool):
|
||||
if e.send_to_llm:
|
||||
observation = str(e.observation)
|
||||
text = str(e.llm_output)
|
||||
observation = f"\n{str(e.observation)}"
|
||||
str(e.llm_output)
|
||||
else:
|
||||
observation = "Invalid or incomplete response"
|
||||
observation = ""
|
||||
elif isinstance(self.handle_parsing_errors, str):
|
||||
observation = self.handle_parsing_errors
|
||||
observation = f"\n{self.handle_parsing_errors}"
|
||||
elif callable(self.handle_parsing_errors):
|
||||
observation = self.handle_parsing_errors(e)
|
||||
observation = f"\n{self.handle_parsing_errors(e)}"
|
||||
else:
|
||||
raise ValueError("Got unexpected type of `handle_parsing_errors`")
|
||||
output = AgentAction("_Exception", observation, text)
|
||||
output = AgentAction("_Exception", observation, "")
|
||||
if run_manager:
|
||||
run_manager.on_agent_action(output, color="green")
|
||||
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
|
||||
observation = ExceptionTool().run(
|
||||
output.tool_input,
|
||||
verbose=self.verbose,
|
||||
verbose=False,
|
||||
color=None,
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
**tool_run_kwargs,
|
||||
)
|
||||
|
||||
if self._should_force_answer():
|
||||
yield self._force_answer(output)
|
||||
error = self._i18n.errors("force_final_answer")
|
||||
output = AgentAction("_Exception", error, error)
|
||||
yield AgentStep(action=output, observation=error)
|
||||
return
|
||||
|
||||
yield AgentStep(action=output, observation=observation)
|
||||
@@ -166,17 +172,6 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
yield output
|
||||
return
|
||||
|
||||
# Override tool usage to use CacheTools
|
||||
if isinstance(output, CacheHit):
|
||||
cache = output.cache
|
||||
action = output.action
|
||||
tool = CacheTools(cache_handler=cache).tool()
|
||||
output = action.copy()
|
||||
output.tool_input = f"tool:{action.tool}|input:{action.tool_input}"
|
||||
output.tool = tool.name
|
||||
name_to_tool_map[tool.name] = tool
|
||||
color_mapping[tool.name] = color_mapping[action.tool]
|
||||
|
||||
actions: List[AgentAction]
|
||||
actions = [output] if isinstance(output, AgentAction) else output
|
||||
yield from actions
|
||||
@@ -184,31 +179,27 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
if run_manager:
|
||||
run_manager.on_agent_action(agent_action, color="green")
|
||||
# Otherwise we lookup the tool
|
||||
if agent_action.tool in name_to_tool_map:
|
||||
tool = name_to_tool_map[agent_action.tool]
|
||||
return_direct = tool.return_direct
|
||||
color = color_mapping[agent_action.tool]
|
||||
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
|
||||
if return_direct:
|
||||
tool_run_kwargs["llm_prefix"] = ""
|
||||
# We then call the tool on the tool input to get an observation
|
||||
observation = tool.run(
|
||||
agent_action.tool_input,
|
||||
verbose=self.verbose,
|
||||
color=color,
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
**tool_run_kwargs,
|
||||
)
|
||||
tool_usage = ToolUsage(
|
||||
tools_handler=self.tools_handler,
|
||||
tools=self.tools,
|
||||
tools_description=self.tools_description,
|
||||
tools_names=self.tools_names,
|
||||
function_calling_llm=self.function_calling_llm,
|
||||
task=self.task,
|
||||
action=agent_action,
|
||||
)
|
||||
tool_calling = tool_usage.parse(agent_action.log)
|
||||
|
||||
if isinstance(tool_calling, ToolUsageErrorException):
|
||||
observation = tool_calling.message
|
||||
else:
|
||||
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
|
||||
observation = InvalidTool().run(
|
||||
{
|
||||
"requested_tool_name": agent_action.tool,
|
||||
"available_tool_names": list(name_to_tool_map.keys()),
|
||||
},
|
||||
verbose=self.verbose,
|
||||
color=None,
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
**tool_run_kwargs,
|
||||
)
|
||||
if tool_calling.tool_name.lower().strip() in [
|
||||
name.lower().strip() for name in name_to_tool_map
|
||||
]:
|
||||
observation = tool_usage.use(tool_calling, agent_action.log)
|
||||
else:
|
||||
observation = self._i18n.errors("wrong_tool_name").format(
|
||||
tool=tool_calling.tool_name,
|
||||
tools=", ".join([tool.name for tool in self.tools]),
|
||||
)
|
||||
yield AgentStep(action=agent_action, observation=observation)
|
||||
|
||||
@@ -1,79 +0,0 @@
|
||||
import re
|
||||
from typing import Union
|
||||
|
||||
from langchain.agents.output_parsers import ReActSingleInputOutputParser
|
||||
from langchain_core.agents import AgentAction, AgentFinish
|
||||
|
||||
from crewai.agents.cache import CacheHandler, CacheHit
|
||||
from crewai.agents.exceptions import TaskRepeatedUsageException
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.utilities import I18N
|
||||
|
||||
FINAL_ANSWER_ACTION = "Final Answer:"
|
||||
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = (
|
||||
"Parsing LLM output produced both a final answer and a parse-able action:"
|
||||
)
|
||||
|
||||
|
||||
class CrewAgentOutputParser(ReActSingleInputOutputParser):
|
||||
"""Parses ReAct-style LLM calls that have a single tool input.
|
||||
|
||||
Expects output to be in one of two formats.
|
||||
|
||||
If the output signals that an action should be taken,
|
||||
should be in the below format. This will result in an AgentAction
|
||||
being returned.
|
||||
|
||||
```
|
||||
Thought: agent thought here
|
||||
Action: search
|
||||
Action Input: what is the temperature in SF?
|
||||
```
|
||||
|
||||
If the output signals that a final answer should be given,
|
||||
should be in the below format. This will result in an AgentFinish
|
||||
being returned.
|
||||
|
||||
```
|
||||
Thought: agent thought here
|
||||
Final Answer: The temperature is 100 degrees
|
||||
```
|
||||
|
||||
It also prevents tools from being reused in a roll.
|
||||
"""
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
tools_handler: ToolsHandler
|
||||
cache: CacheHandler
|
||||
i18n: I18N
|
||||
|
||||
def parse(self, text: str) -> Union[AgentAction, AgentFinish, CacheHit]:
|
||||
regex = (
|
||||
r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
|
||||
)
|
||||
if action_match := re.search(regex, text, re.DOTALL):
|
||||
action = action_match.group(1).strip()
|
||||
action_input = action_match.group(2)
|
||||
tool_input = action_input.strip(" ")
|
||||
tool_input = tool_input.strip('"')
|
||||
|
||||
if last_tool_usage := self.tools_handler.last_used_tool:
|
||||
usage = {
|
||||
"tool": action,
|
||||
"input": tool_input,
|
||||
}
|
||||
if usage == last_tool_usage:
|
||||
raise TaskRepeatedUsageException(
|
||||
text=text,
|
||||
tool=action,
|
||||
tool_input=tool_input,
|
||||
i18n=self.i18n,
|
||||
)
|
||||
|
||||
if self.cache.read(action, tool_input):
|
||||
action = AgentAction(action, tool_input, text)
|
||||
return CacheHit(action=action, cache=self.cache)
|
||||
|
||||
return super().parse(text)
|
||||
90
src/crewai/agents/parser.py
Normal file
90
src/crewai/agents/parser.py
Normal file
@@ -0,0 +1,90 @@
|
||||
import re
|
||||
from typing import Any, Union
|
||||
|
||||
from langchain.agents.output_parsers import ReActSingleInputOutputParser
|
||||
from langchain_core.agents import AgentAction, AgentFinish
|
||||
from langchain_core.exceptions import OutputParserException
|
||||
|
||||
from crewai.utilities import I18N
|
||||
|
||||
FINAL_ANSWER_ACTION = "Final Answer:"
|
||||
MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE = "I did it wrong. Invalid Format: I missed the 'Action:' after 'Thought:'. I will do right next, and don't use a tool I have already used.\n"
|
||||
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE = "I did it wrong. Invalid Format: I missed the 'Action Input:' after 'Action:'. I will do right next, and don't use a tool I have already used.\n"
|
||||
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = "I did it wrong. Tried to both perform Action and give a Final Answer at the same time, I must do one or the other"
|
||||
|
||||
|
||||
class CrewAgentParser(ReActSingleInputOutputParser):
|
||||
"""Parses ReAct-style LLM calls that have a single tool input.
|
||||
|
||||
Expects output to be in one of two formats.
|
||||
|
||||
If the output signals that an action should be taken,
|
||||
should be in the below format. This will result in an AgentAction
|
||||
being returned.
|
||||
|
||||
Thought: agent thought here
|
||||
Action: search
|
||||
Action Input: what is the temperature in SF?
|
||||
|
||||
If the output signals that a final answer should be given,
|
||||
should be in the below format. This will result in an AgentFinish
|
||||
being returned.
|
||||
|
||||
Thought: agent thought here
|
||||
Final Answer: The temperature is 100 degrees
|
||||
"""
|
||||
|
||||
_i18n: I18N = I18N()
|
||||
agent: Any = None
|
||||
|
||||
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
|
||||
includes_answer = FINAL_ANSWER_ACTION in text
|
||||
regex = (
|
||||
r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
|
||||
)
|
||||
action_match = re.search(regex, text, re.DOTALL)
|
||||
if action_match:
|
||||
if includes_answer:
|
||||
raise OutputParserException(
|
||||
f"{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}: {text}"
|
||||
)
|
||||
action = action_match.group(1).strip()
|
||||
action_input = action_match.group(2)
|
||||
tool_input = action_input.strip(" ")
|
||||
tool_input = tool_input.strip('"')
|
||||
|
||||
return AgentAction(action, tool_input, text)
|
||||
|
||||
elif includes_answer:
|
||||
return AgentFinish(
|
||||
{"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text
|
||||
)
|
||||
|
||||
if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL):
|
||||
self.agent.increment_formatting_errors()
|
||||
raise OutputParserException(
|
||||
f"Could not parse LLM output: `{text}`",
|
||||
observation=f"{MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE}\n{self._i18n.slice('final_answer_format')}",
|
||||
llm_output=text,
|
||||
send_to_llm=True,
|
||||
)
|
||||
elif not re.search(
|
||||
r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL
|
||||
):
|
||||
self.agent.increment_formatting_errors()
|
||||
raise OutputParserException(
|
||||
f"Could not parse LLM output: `{text}`",
|
||||
observation=MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
|
||||
llm_output=text,
|
||||
send_to_llm=True,
|
||||
)
|
||||
else:
|
||||
format = self._i18n.slice("format_without_tools")
|
||||
error = f"{format}"
|
||||
self.agent.increment_formatting_errors()
|
||||
raise OutputParserException(
|
||||
error,
|
||||
observation=error,
|
||||
llm_output=text,
|
||||
send_to_llm=True,
|
||||
)
|
||||
@@ -1,44 +1,27 @@
|
||||
from typing import Any, Dict
|
||||
|
||||
from langchain.callbacks.base import BaseCallbackHandler
|
||||
from typing import Any
|
||||
|
||||
from ..tools.cache_tools import CacheTools
|
||||
from ..tools.tool_calling import ToolCalling
|
||||
from .cache.cache_handler import CacheHandler
|
||||
|
||||
|
||||
class ToolsHandler(BaseCallbackHandler):
|
||||
class ToolsHandler:
|
||||
"""Callback handler for tool usage."""
|
||||
|
||||
last_used_tool: Dict[str, Any] = {}
|
||||
last_used_tool: ToolCalling = {}
|
||||
cache: CacheHandler
|
||||
|
||||
def __init__(self, cache: CacheHandler, **kwargs: Any):
|
||||
def __init__(self, cache: CacheHandler):
|
||||
"""Initialize the callback handler."""
|
||||
self.cache = cache
|
||||
super().__init__(**kwargs)
|
||||
self.last_used_tool = {}
|
||||
|
||||
def on_tool_start(
|
||||
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
|
||||
) -> Any:
|
||||
"""Run when tool starts running."""
|
||||
name = serialized.get("name")
|
||||
if name not in ["invalid_tool", "_Exception"]:
|
||||
tools_usage = {
|
||||
"tool": name,
|
||||
"input": input_str,
|
||||
}
|
||||
self.last_used_tool = tools_usage
|
||||
|
||||
def on_tool_end(self, output: str, **kwargs: Any) -> Any:
|
||||
def on_tool_use(self, calling: ToolCalling, output: str) -> Any:
|
||||
"""Run when tool ends running."""
|
||||
if (
|
||||
"is not a valid tool" not in output
|
||||
and "Invalid or incomplete response" not in output
|
||||
and "Invalid Format" not in output
|
||||
):
|
||||
if self.last_used_tool["tool"] != CacheTools().name:
|
||||
self.cache.add(
|
||||
tool=self.last_used_tool["tool"],
|
||||
input=self.last_used_tool["input"],
|
||||
output=output,
|
||||
)
|
||||
self.last_used_tool = calling
|
||||
if calling.tool_name != CacheTools().name:
|
||||
self.cache.add(
|
||||
tool=calling.tool_name,
|
||||
input=calling.arguments,
|
||||
output=output,
|
||||
)
|
||||
|
||||
0
src/crewai/cli/__init__.py
Normal file
0
src/crewai/cli/__init__.py
Normal file
19
src/crewai/cli/cli.py
Normal file
19
src/crewai/cli/cli.py
Normal file
@@ -0,0 +1,19 @@
|
||||
import click
|
||||
|
||||
from .create_crew import create_crew
|
||||
|
||||
|
||||
@click.group()
|
||||
def crewai():
|
||||
"""Top-level command group for crewai."""
|
||||
|
||||
|
||||
@crewai.command()
|
||||
@click.argument("project_name")
|
||||
def create(project_name):
|
||||
"""Create a new crew."""
|
||||
create_crew(project_name)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
crewai()
|
||||
79
src/crewai/cli/create_crew.py
Normal file
79
src/crewai/cli/create_crew.py
Normal file
@@ -0,0 +1,79 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import click
|
||||
|
||||
|
||||
def create_crew(name):
|
||||
"""Create a new crew."""
|
||||
folder_name = name.replace(" ", "_").replace("-", "_").lower()
|
||||
class_name = name.replace("_", " ").replace("-", " ").title().replace(" ", "")
|
||||
|
||||
click.secho(f"Creating folder {folder_name}...", fg="green", bold=True)
|
||||
|
||||
if not os.path.exists(folder_name):
|
||||
os.mkdir(folder_name)
|
||||
os.mkdir(folder_name + "/tests")
|
||||
os.mkdir(folder_name + "/src")
|
||||
os.mkdir(folder_name + "/src/tools")
|
||||
os.mkdir(folder_name + "/src/config")
|
||||
with open(folder_name + "/.env", "w") as file:
|
||||
file.write("OPENAI_API_KEY=YOUR_API_KEY")
|
||||
else:
|
||||
click.secho(
|
||||
f"\tFolder {folder_name} already exists. Please choose a different name.",
|
||||
fg="red",
|
||||
)
|
||||
return
|
||||
|
||||
package_dir = Path(__file__).parent
|
||||
templates_dir = package_dir / "templates"
|
||||
|
||||
# List of template files to copy
|
||||
root_template_files = [
|
||||
".gitignore",
|
||||
"pyproject.toml",
|
||||
"README.md",
|
||||
]
|
||||
tools_template_files = ["tools/custom_tool.py", "tools/__init__.py"]
|
||||
config_template_files = ["config/agents.yaml", "config/tasks.yaml"]
|
||||
src_template_files = ["__init__.py", "main.py", "crew.py"]
|
||||
|
||||
for file_name in root_template_files:
|
||||
src_file = templates_dir / file_name
|
||||
dst_file = Path(folder_name) / file_name
|
||||
copy_template(src_file, dst_file, name, class_name, folder_name)
|
||||
|
||||
for file_name in src_template_files:
|
||||
src_file = templates_dir / file_name
|
||||
dst_file = Path(folder_name) / "src" / file_name
|
||||
copy_template(src_file, dst_file, name, class_name, folder_name)
|
||||
|
||||
for file_name in tools_template_files:
|
||||
src_file = templates_dir / file_name
|
||||
dst_file = Path(folder_name) / "src" / file_name
|
||||
copy_template(src_file, dst_file, name, class_name, folder_name)
|
||||
|
||||
for file_name in config_template_files:
|
||||
src_file = templates_dir / file_name
|
||||
dst_file = Path(folder_name) / "src" / file_name
|
||||
copy_template(src_file, dst_file, name, class_name, folder_name)
|
||||
|
||||
click.secho(f"Crew {name} created successfully!", fg="green", bold=True)
|
||||
|
||||
|
||||
def copy_template(src, dst, name, class_name, folder_name):
|
||||
"""Copy a file from src to dst."""
|
||||
with open(src, "r") as file:
|
||||
content = file.read()
|
||||
|
||||
# Interpolate the content
|
||||
content = content.replace("{{name}}", name)
|
||||
content = content.replace("{{crew_name}}", class_name)
|
||||
content = content.replace("{{folder_name}}", folder_name)
|
||||
|
||||
# Write the interpolated content to the new file
|
||||
with open(dst, "w") as file:
|
||||
file.write(content)
|
||||
|
||||
click.secho(f" - Created {dst}", fg="green")
|
||||
2
src/crewai/cli/templates/.gitignore
vendored
Normal file
2
src/crewai/cli/templates/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
.env
|
||||
__pycache__/
|
||||
57
src/crewai/cli/templates/README.md
Normal file
57
src/crewai/cli/templates/README.md
Normal file
@@ -0,0 +1,57 @@
|
||||
# {{crew_name}} Crew
|
||||
|
||||
Welcome to the {{crew_name}} Crew project, powered by [crewAI](https://crewai.com). This template is designed to help you set up a multi-agent AI system with ease, leveraging the powerful and flexible framework provided by crewAI. Our goal is to enable your agents to collaborate effectively on complex tasks, maximizing their collective intelligence and capabilities.
|
||||
|
||||
## Installation
|
||||
|
||||
Ensure you have Python >=3.10 <=3.13 installed on your system. This project uses [Poetry](https://python-poetry.org/) for dependency management and package handling, offering a seamless setup and execution experience.
|
||||
|
||||
First, if you haven't already, install Poetry:
|
||||
|
||||
```bash
|
||||
pip install poetry
|
||||
```
|
||||
|
||||
Next, navigate to your project directory and install the dependencies:
|
||||
|
||||
1. First lock the dependencies and then install them:
|
||||
```bash
|
||||
poetry lock
|
||||
```
|
||||
```bash
|
||||
poetry install
|
||||
```
|
||||
### Customizing
|
||||
|
||||
**Add you `OPENAI_API_KEY` on the `.env` file**
|
||||
|
||||
- Modify `src/{{folder_name}}/config/agents.yaml` to define your agents
|
||||
- Modify `src/{{folder_name}}/config/tasks.yaml` to define your tasks
|
||||
- Modify `src/{{folder_name}}/crew.py` to add your own logic, tools and specific args
|
||||
- Modify `src/{{folder_name}}/main.py` to add custom inputs for your agents and tasks
|
||||
|
||||
## Running the Project
|
||||
|
||||
To kickstart your crew of AI agents and begin task execution, run this from the root folder of your project:
|
||||
|
||||
```bash
|
||||
poetry run {{folder_name}}
|
||||
```
|
||||
|
||||
This command initializes the {{name}} Crew, assembling the agents and assigning them tasks as defined in your configuration.
|
||||
|
||||
This example, unmodified, will run the create a `report.md` file with the output of a research on LLMs in the root folser
|
||||
|
||||
## Understanding Your Crew
|
||||
|
||||
The {{name}} Crew is composed of multiple AI agents, each with unique roles, goals, and tools. These agents collaborate on a series of tasks, defined in `config/tasks.yaml`, leveraging their collective skills to achieve complex objectives. The `config/agents.yaml` file outlines the capabilities and configurations of each agent in your crew.
|
||||
|
||||
## Support
|
||||
|
||||
For support, questions, or feedback regarding the {{crew_name}} Crew or crewAI.
|
||||
- Visit our [documentation](https://docs.crewai.com)
|
||||
- Reach out to us through our [GitHub repository](https://github.com/joaomdmoura/crewai)
|
||||
- [Joing our Discord](https://discord.com/invite/X4JWnZnxPb)
|
||||
- [Chat wtih our docs](https://chatg.pt/DWjSBZn)
|
||||
|
||||
Let's create wonders together with the power and simplicity of crewAI.
|
||||
0
src/crewai/cli/templates/__init__.py
Normal file
0
src/crewai/cli/templates/__init__.py
Normal file
19
src/crewai/cli/templates/config/agents.yaml
Normal file
19
src/crewai/cli/templates/config/agents.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
researcher:
|
||||
role: >
|
||||
{topic} Senior Data Researcher
|
||||
goal: >
|
||||
Uncover cutting-edge developments in {topic}
|
||||
backstory: >
|
||||
You're a seasoned researcher with a knack for uncovering the latest
|
||||
developments in {topic}. Known for your ability to find the most relevant
|
||||
information and present it in a clear and concise manner.
|
||||
|
||||
reporting_analyst:
|
||||
role: >
|
||||
{topic} Reporting Analyst
|
||||
goal: >
|
||||
Create detailed reports based on {topic} data analysis and research findings
|
||||
backstory: >
|
||||
You're a meticulous analyst with a keen eye for detail. You're known for
|
||||
your ability to turn complex data into clear and concise reports, making
|
||||
it easy for others to understand and act on the information you provide.
|
||||
15
src/crewai/cli/templates/config/tasks.yaml
Normal file
15
src/crewai/cli/templates/config/tasks.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
research_task:
|
||||
description: >
|
||||
Conduct a thorough research about {topic}
|
||||
Make sure you find any interesting and relevant information given
|
||||
the current year is 2024.
|
||||
expected_output: >
|
||||
A list with 10 bullet points of the most relevant information about {topic}
|
||||
|
||||
reporting_task:
|
||||
description: >
|
||||
Review the context you got and expand each topic into a full section for a report.
|
||||
Make sure the report is detailed and contains any and all relevant information.
|
||||
expected_output: >
|
||||
A fully fledge reports with the mains topics, each with a full section of information.
|
||||
Formated as markdown with out '```'
|
||||
55
src/crewai/cli/templates/crew.py
Normal file
55
src/crewai/cli/templates/crew.py
Normal file
@@ -0,0 +1,55 @@
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
|
||||
# Uncomment the following line to use an example of a custom tool
|
||||
# from {{folder_name}}.tools.custom_tool import MyCustomTool
|
||||
|
||||
# Check our tools documentations for more information on how to use them
|
||||
# from crewai_tools import SerperDevTool
|
||||
|
||||
@CrewBase
|
||||
class {{crew_name}}Crew():
|
||||
"""{{crew_name}} crew"""
|
||||
agents_config = 'config/agents.yaml'
|
||||
tasks_config = 'config/tasks.yaml'
|
||||
|
||||
@agent
|
||||
def researcher(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['researcher'],
|
||||
# tools=[MyCustomTool()], # Example of custom tool, loaded on the beginning of file
|
||||
verbose=True
|
||||
)
|
||||
|
||||
@agent
|
||||
def reporting_analyst(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['reporting_analyst'],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
@task
|
||||
def research_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['research_task'],
|
||||
agent=self.researcher()
|
||||
)
|
||||
|
||||
@task
|
||||
def reporting_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['reporting_task'],
|
||||
agent=self.reporting_analyst(),
|
||||
output_file='report.md'
|
||||
)
|
||||
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
"""Creates the {{crew_name}} crew"""
|
||||
return Crew(
|
||||
agents=self.agents, # Automatically created by the @agent decorator
|
||||
tasks=self.tasks, # Automatically created by the @task decorator
|
||||
process=Process.sequential,
|
||||
verbose=2,
|
||||
# process=Process.hierarchical, # In case you wanna use that instead https://docs.crewai.com/how-to/Hierarchical/
|
||||
)
|
||||
13
src/crewai/cli/templates/main.py
Normal file
13
src/crewai/cli/templates/main.py
Normal file
@@ -0,0 +1,13 @@
|
||||
#!/usr/bin/env python
|
||||
from crew import {{crew_name}}Crew
|
||||
|
||||
|
||||
def run():
|
||||
# Replace with your inputs, it will automatically interpolate any tasks and agents information
|
||||
inputs = {
|
||||
'topic': 'AI LLMs'
|
||||
}
|
||||
{{crew_name}}Crew().crew().kickoff(inputs=inputs)
|
||||
|
||||
if __name__ == "__main__":
|
||||
run()
|
||||
16
src/crewai/cli/templates/pyproject.toml
Normal file
16
src/crewai/cli/templates/pyproject.toml
Normal file
@@ -0,0 +1,16 @@
|
||||
[tool.poetry]
|
||||
name = "{{folder_name}}"
|
||||
version = "0.1.0"
|
||||
description = "{{name}} using crewAI"
|
||||
authors = ["Your Name <you@example.com>"]
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.10,<=3.13"
|
||||
crewai = {extras = ["tools"], version = "^0.22.2"}
|
||||
|
||||
[tool.poetry.scripts]
|
||||
{{folder_name}} = "{{folder_name}}.main:run"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
0
src/crewai/cli/templates/tools/__init__.py
Normal file
0
src/crewai/cli/templates/tools/__init__.py
Normal file
10
src/crewai/cli/templates/tools/custom_tool.py
Normal file
10
src/crewai/cli/templates/tools/custom_tool.py
Normal file
@@ -0,0 +1,10 @@
|
||||
from crewai_tools import BaseTool
|
||||
|
||||
|
||||
class MyCustomTool(BaseTool):
|
||||
name: str = "Name of my tool"
|
||||
description: str = "Clear description for what this tool is useful for, you agent will need this information to use it."
|
||||
|
||||
def _run(self, argument: str) -> str:
|
||||
# Implementation goes here
|
||||
return "this is an example of a tool output, ignore it and move along."
|
||||
@@ -2,6 +2,7 @@ import json
|
||||
import uuid
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
from langchain_core.callbacks import BaseCallbackHandler
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
BaseModel,
|
||||
@@ -19,7 +20,7 @@ from crewai.agent import Agent
|
||||
from crewai.agents.cache import CacheHandler
|
||||
from crewai.process import Process
|
||||
from crewai.task import Task
|
||||
from crewai.telemtry import Telemetry
|
||||
from crewai.telemetry import Telemetry
|
||||
from crewai.tools.agent_tools import AgentTools
|
||||
from crewai.utilities import I18N, Logger, RPMController
|
||||
|
||||
@@ -32,15 +33,20 @@ class Crew(BaseModel):
|
||||
tasks: List of tasks assigned to the crew.
|
||||
agents: List of agents part of this crew.
|
||||
manager_llm: The language model that will run manager agent.
|
||||
manager_callbacks: The callback handlers to be executed by the manager agent when hierarchical process is used
|
||||
function_calling_llm: The language model that will run the tool calling for all the agents.
|
||||
process: The process flow that the crew will follow (e.g., sequential).
|
||||
verbose: Indicates the verbosity level for logging during execution.
|
||||
config: Configuration settings for the crew.
|
||||
_cache_handler: Handles caching for the crew's operations.
|
||||
max_rpm: Maximum number of requests per minute for the crew execution to be respected.
|
||||
id: A unique identifier for the crew instance.
|
||||
full_output: Whether the crew should return the full output with all tasks outputs or just the final output.
|
||||
step_callback: Callback to be executed after each step for every agents execution.
|
||||
share_crew: Whether you want to share the complete crew infromation and execution with crewAI to make the library better, and allow us to train models.
|
||||
"""
|
||||
|
||||
__hash__ = object.__hash__ # type: ignore
|
||||
_execution_span: Any = PrivateAttr()
|
||||
_rpm_controller: RPMController = PrivateAttr()
|
||||
_logger: Logger = PrivateAttr()
|
||||
_cache_handler: InstanceOf[CacheHandler] = PrivateAttr(default=CacheHandler())
|
||||
@@ -49,11 +55,31 @@ class Crew(BaseModel):
|
||||
agents: List[Agent] = Field(default_factory=list)
|
||||
process: Process = Field(default=Process.sequential)
|
||||
verbose: Union[int, bool] = Field(default=0)
|
||||
usage_metrics: Optional[dict] = Field(
|
||||
default=None,
|
||||
description="Metrics for the LLM usage during all tasks execution.",
|
||||
)
|
||||
full_output: Optional[bool] = Field(
|
||||
default=False,
|
||||
description="Whether the crew should return the full output with all tasks outputs or just the final output.",
|
||||
)
|
||||
manager_llm: Optional[Any] = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
manager_callbacks: Optional[List[InstanceOf[BaseCallbackHandler]]] = Field(
|
||||
default=None,
|
||||
description="A list of callback handlers to be executed by the manager agent when hierarchical process is used",
|
||||
)
|
||||
function_calling_llm: Optional[Any] = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
config: Optional[Union[Json, Dict[str, Any]]] = Field(default=None)
|
||||
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
|
||||
share_crew: Optional[bool] = Field(default=False)
|
||||
step_callback: Optional[Any] = Field(
|
||||
default=None,
|
||||
description="Callback to be executed after each step for all agents execution.",
|
||||
)
|
||||
max_rpm: Optional[int] = Field(
|
||||
default=None,
|
||||
description="Maximum number of requests per minute for the crew execution to be respected.",
|
||||
@@ -94,6 +120,7 @@ class Crew(BaseModel):
|
||||
self._logger = Logger(self.verbose)
|
||||
self._rpm_controller = RPMController(max_rpm=self.max_rpm, logger=self._logger)
|
||||
self._telemetry = Telemetry()
|
||||
self._telemetry.set_tracer()
|
||||
self._telemetry.crew_creation(self)
|
||||
return self
|
||||
|
||||
@@ -137,6 +164,7 @@ class Crew(BaseModel):
|
||||
"missing_keys_in_config", "Config should have 'agents' and 'tasks'.", {}
|
||||
)
|
||||
|
||||
self.process = self.config.get("process", self.process)
|
||||
self.agents = [Agent(**agent) for agent in self.config["agents"]]
|
||||
self.tasks = [self._create_task(task) for task in self.config["tasks"]]
|
||||
|
||||
@@ -155,45 +183,69 @@ class Crew(BaseModel):
|
||||
del task_config["agent"]
|
||||
return Task(**task_config, agent=task_agent)
|
||||
|
||||
def kickoff(self) -> str:
|
||||
def kickoff(self, inputs: Optional[Dict[str, Any]] = {}) -> str:
|
||||
"""Starts the crew to work on its assigned tasks."""
|
||||
self._execution_span = self._telemetry.crew_execution_span(self)
|
||||
self._interpolate_inputs(inputs)
|
||||
|
||||
for agent in self.agents:
|
||||
agent.i18n = I18N(language=self.language)
|
||||
|
||||
if self.process == Process.sequential:
|
||||
return self._run_sequential_process()
|
||||
if self.process == Process.hierarchical:
|
||||
return self._run_hierarchical_process()
|
||||
if not agent.function_calling_llm:
|
||||
agent.function_calling_llm = self.function_calling_llm
|
||||
agent.create_agent_executor()
|
||||
if not agent.step_callback:
|
||||
agent.step_callback = self.step_callback
|
||||
agent.create_agent_executor()
|
||||
|
||||
raise NotImplementedError(
|
||||
f"The process '{self.process}' is not implemented yet."
|
||||
)
|
||||
metrics = []
|
||||
|
||||
if self.process == Process.sequential:
|
||||
result = self._run_sequential_process()
|
||||
elif self.process == Process.hierarchical:
|
||||
result, manager_metrics = self._run_hierarchical_process()
|
||||
metrics.append(manager_metrics)
|
||||
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"The process '{self.process}' is not implemented yet."
|
||||
)
|
||||
|
||||
metrics = metrics + [
|
||||
agent._token_process.get_summary() for agent in self.agents
|
||||
]
|
||||
self.usage_metrics = {
|
||||
key: sum([m[key] for m in metrics if m is not None]) for key in metrics[0]
|
||||
}
|
||||
|
||||
return result
|
||||
|
||||
def _run_sequential_process(self) -> str:
|
||||
"""Executes tasks sequentially and returns the final output."""
|
||||
task_output = ""
|
||||
for task in self.tasks:
|
||||
if task.agent is not None and task.agent.allow_delegation:
|
||||
if task.agent.allow_delegation:
|
||||
agents_for_delegation = [
|
||||
agent for agent in self.agents if agent != task.agent
|
||||
]
|
||||
task.tools += AgentTools(agents=agents_for_delegation).tools()
|
||||
if len(self.agents) > 1 and len(agents_for_delegation) > 0:
|
||||
task.tools += AgentTools(agents=agents_for_delegation).tools()
|
||||
|
||||
role = task.agent.role if task.agent is not None else "None"
|
||||
self._logger.log("debug", f"Working Agent: {role}")
|
||||
self._logger.log("info", f"Starting Task: {task.description}")
|
||||
self._logger.log("debug", f"== Working Agent: {role}", color="bold_yellow")
|
||||
self._logger.log(
|
||||
"info", f"== Starting Task: {task.description}", color="bold_yellow"
|
||||
)
|
||||
|
||||
output = task.execute(context=task_output)
|
||||
if not task.async_execution:
|
||||
task_output = output
|
||||
|
||||
role = task.agent.role if task.agent is not None else "None"
|
||||
self._logger.log("debug", f"[{role}] Task output: {task_output}\n\n")
|
||||
self._logger.log("debug", f"== [{role}] Task output: {task_output}\n\n")
|
||||
|
||||
if self.max_rpm:
|
||||
self._rpm_controller.stop_rpm_counter()
|
||||
|
||||
return task_output
|
||||
self._finish_execution(task_output)
|
||||
return self._format_output(task_output)
|
||||
|
||||
def _run_hierarchical_process(self) -> str:
|
||||
"""Creates and assigns a manager agent to make sure the crew completes the tasks."""
|
||||
@@ -217,11 +269,30 @@ class Crew(BaseModel):
|
||||
agent=manager, context=task_output, tools=manager.tools
|
||||
)
|
||||
|
||||
self._logger.log(
|
||||
"debug", f"[{manager.role}] Task output: {task_output}\n\n"
|
||||
)
|
||||
self._logger.log("debug", f"[{manager.role}] Task output: {task_output}")
|
||||
|
||||
self._finish_execution(task_output)
|
||||
return self._format_output(task_output), manager._token_process.get_summary()
|
||||
|
||||
def _interpolate_inputs(self, inputs: Dict[str, Any]) -> str:
|
||||
"""Interpolates the inputs in the tasks and agents."""
|
||||
[task.interpolate_inputs(inputs) for task in self.tasks]
|
||||
[agent.interpolate_inputs(inputs) for agent in self.agents]
|
||||
|
||||
def _format_output(self, output: str) -> str:
|
||||
"""Formats the output of the crew execution."""
|
||||
if self.full_output:
|
||||
return {
|
||||
"final_output": output,
|
||||
"tasks_outputs": [task.output for task in self.tasks if task],
|
||||
}
|
||||
else:
|
||||
return output
|
||||
|
||||
def _finish_execution(self, output) -> None:
|
||||
if self.max_rpm:
|
||||
self._rpm_controller.stop_rpm_counter()
|
||||
self._telemetry.end_crew(self, output)
|
||||
|
||||
return task_output
|
||||
def __repr__(self):
|
||||
return f"Crew(id={self.id}, process={self.process}, number_of_agents={len(self.agents)}, number_of_tasks={len(self.tasks)})"
|
||||
|
||||
2
src/crewai/project/__init__.py
Normal file
2
src/crewai/project/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
from .annotations import agent, crew, task
|
||||
from .crew_base import CrewBase
|
||||
47
src/crewai/project/annotations.py
Normal file
47
src/crewai/project/annotations.py
Normal file
@@ -0,0 +1,47 @@
|
||||
tasks_order = []
|
||||
|
||||
|
||||
def task(func):
|
||||
func.is_task = True
|
||||
tasks_order.append(func.__name__)
|
||||
return func
|
||||
|
||||
|
||||
def agent(func):
|
||||
func.is_agent = True
|
||||
return func
|
||||
|
||||
|
||||
def crew(func):
|
||||
def wrapper(self, *args, **kwargs):
|
||||
instantiated_tasks = []
|
||||
instantiated_agents = []
|
||||
|
||||
agent_roles = set()
|
||||
# Iterate over tasks_order to maintain the defined order
|
||||
for task_name in tasks_order:
|
||||
possible_task = getattr(self, task_name)
|
||||
if callable(possible_task):
|
||||
task_instance = possible_task()
|
||||
instantiated_tasks.append(task_instance)
|
||||
if hasattr(task_instance, "agent"):
|
||||
agent_instance = task_instance.agent
|
||||
if agent_instance.role not in agent_roles:
|
||||
instantiated_agents.append(agent_instance)
|
||||
agent_roles.add(agent_instance.role)
|
||||
|
||||
# Instantiate any additional agents not already included by tasks
|
||||
for attr_name in dir(self):
|
||||
possible_agent = getattr(self, attr_name)
|
||||
if callable(possible_agent) and hasattr(possible_agent, "is_agent"):
|
||||
temp_agent_instance = possible_agent()
|
||||
if temp_agent_instance.role not in agent_roles:
|
||||
instantiated_agents.append(temp_agent_instance)
|
||||
agent_roles.add(temp_agent_instance.role)
|
||||
|
||||
self.agents = instantiated_agents
|
||||
self.tasks = instantiated_tasks
|
||||
|
||||
return func(self, *args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
45
src/crewai/project/crew_base.py
Normal file
45
src/crewai/project/crew_base.py
Normal file
@@ -0,0 +1,45 @@
|
||||
import inspect
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import yaml
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
|
||||
|
||||
def CrewBase(cls):
|
||||
class WrappedClass(cls):
|
||||
is_crew_class = True
|
||||
|
||||
base_directory = None
|
||||
for frame_info in inspect.stack():
|
||||
if "site-packages" not in frame_info.filename:
|
||||
base_directory = Path(frame_info.filename).parent.resolve()
|
||||
break
|
||||
|
||||
if base_directory is None:
|
||||
raise Exception(
|
||||
"Unable to dynamically determine the project's base directory, you must run it from the project's root directory."
|
||||
)
|
||||
|
||||
original_agents_config_path = getattr(
|
||||
cls, "agents_config", "config/agents.yaml"
|
||||
)
|
||||
original_tasks_config_path = getattr(cls, "tasks_config", "config/tasks.yaml")
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.agents_config = self.load_yaml(
|
||||
os.path.join(self.base_directory, self.original_agents_config_path)
|
||||
)
|
||||
self.tasks_config = self.load_yaml(
|
||||
os.path.join(self.base_directory, self.original_tasks_config_path)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def load_yaml(config_path: str):
|
||||
with open(config_path, "r") as file:
|
||||
return yaml.safe_load(file)
|
||||
|
||||
return WrappedClass
|
||||
@@ -1,13 +1,15 @@
|
||||
import threading
|
||||
import uuid
|
||||
from typing import Any, List, Optional
|
||||
from typing import Any, Dict, List, Optional, Type
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
from pydantic import UUID4, BaseModel, Field, field_validator, model_validator
|
||||
from pydantic_core import PydanticCustomError
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.utilities import I18N
|
||||
from crewai.utilities import I18N, Converter, ConverterError, Printer
|
||||
from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
|
||||
|
||||
|
||||
class Task(BaseModel):
|
||||
@@ -17,19 +19,25 @@ class Task(BaseModel):
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
__hash__ = object.__hash__ # type: ignore
|
||||
used_tools: int = 0
|
||||
tools_errors: int = 0
|
||||
delegations: int = 0
|
||||
i18n: I18N = I18N()
|
||||
thread: threading.Thread = None
|
||||
description: str = Field(description="Description of the actual task.")
|
||||
expected_output: str = Field(
|
||||
description="Clear definition of expected output for the task."
|
||||
)
|
||||
config: Optional[Dict[str, Any]] = Field(
|
||||
description="Configuration for the agent",
|
||||
default=None,
|
||||
)
|
||||
callback: Optional[Any] = Field(
|
||||
description="Callback to be executed after the task is completed.", default=None
|
||||
)
|
||||
agent: Optional[Agent] = Field(
|
||||
description="Agent responsible for execution the task.", default=None
|
||||
)
|
||||
expected_output: Optional[str] = Field(
|
||||
description="Clear definition of expected output for the task.",
|
||||
default=None,
|
||||
)
|
||||
context: Optional[List["Task"]] = Field(
|
||||
description="Other tasks that will have their output used as context for this task.",
|
||||
default=None,
|
||||
@@ -38,10 +46,22 @@ class Task(BaseModel):
|
||||
description="Whether the task should be executed asynchronously or not.",
|
||||
default=False,
|
||||
)
|
||||
output_json: Optional[Type[BaseModel]] = Field(
|
||||
description="A Pydantic model to be used to create a JSON output.",
|
||||
default=None,
|
||||
)
|
||||
output_pydantic: Optional[Type[BaseModel]] = Field(
|
||||
description="A Pydantic model to be used to create a Pydantic output.",
|
||||
default=None,
|
||||
)
|
||||
output_file: Optional[str] = Field(
|
||||
description="A file path to be used to create a file output.",
|
||||
default=None,
|
||||
)
|
||||
output: Optional[TaskOutput] = Field(
|
||||
description="Task output, it's final result after being executed", default=None
|
||||
)
|
||||
tools: List[Any] = Field(
|
||||
tools: Optional[List[Any]] = Field(
|
||||
default_factory=list,
|
||||
description="Tools the agent is limited to use for this task.",
|
||||
)
|
||||
@@ -51,6 +71,10 @@ class Task(BaseModel):
|
||||
description="Unique identifier for the object, not set by user.",
|
||||
)
|
||||
|
||||
def __init__(__pydantic_self__, **data):
|
||||
config = data.pop("config", {})
|
||||
super().__init__(**config, **data)
|
||||
|
||||
@field_validator("id", mode="before")
|
||||
@classmethod
|
||||
def _deny_user_set_id(cls, v: Optional[UUID4]) -> None:
|
||||
@@ -59,6 +83,14 @@ class Task(BaseModel):
|
||||
"may_not_set_field", "This field is not to be set by the user.", {}
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_attributes_based_on_config(self) -> "Task":
|
||||
"""Set attributes based on the agent configuration."""
|
||||
if self.config:
|
||||
for key, value in self.config.items():
|
||||
setattr(self, key, value)
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def check_tools(self):
|
||||
"""Check if the tools are set."""
|
||||
@@ -66,6 +98,18 @@ class Task(BaseModel):
|
||||
self.tools.extend(self.agent.tools)
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def check_output(self):
|
||||
"""Check if an output type is set."""
|
||||
output_types = [self.output_json, self.output_pydantic]
|
||||
if len([type for type in output_types if type]) > 1:
|
||||
raise PydanticCustomError(
|
||||
"output_type",
|
||||
"Only one output type can be set, either output_pydantic or output_json.",
|
||||
{},
|
||||
)
|
||||
return self
|
||||
|
||||
def execute(
|
||||
self,
|
||||
agent: Agent | None = None,
|
||||
@@ -89,32 +133,47 @@ class Task(BaseModel):
|
||||
for task in self.context:
|
||||
if task.async_execution:
|
||||
task.thread.join()
|
||||
context.append(task.output.result)
|
||||
if task and task.output:
|
||||
context.append(task.output.raw_output)
|
||||
context = "\n".join(context)
|
||||
|
||||
tools = tools or self.tools
|
||||
|
||||
if self.async_execution:
|
||||
self.thread = threading.Thread(
|
||||
target=self._execute, args=(agent, self._prompt(), context, tools)
|
||||
target=self._execute, args=(agent, self, context, tools)
|
||||
)
|
||||
self.thread.start()
|
||||
else:
|
||||
result = self._execute(
|
||||
task=self,
|
||||
agent=agent,
|
||||
task_prompt=self._prompt(),
|
||||
context=context,
|
||||
tools=tools,
|
||||
)
|
||||
return result
|
||||
|
||||
def _execute(self, agent, task_prompt, context, tools):
|
||||
result = agent.execute_task(task=task_prompt, context=context, tools=tools)
|
||||
self.output = TaskOutput(description=self.description, result=result)
|
||||
self.callback(self.output) if self.callback else None
|
||||
return result
|
||||
def _execute(self, agent, task, context, tools):
|
||||
result = agent.execute_task(
|
||||
task=task,
|
||||
context=context,
|
||||
tools=tools,
|
||||
)
|
||||
|
||||
def _prompt(self) -> str:
|
||||
exported_output = self._export_output(result)
|
||||
|
||||
self.output = TaskOutput(
|
||||
description=self.description,
|
||||
exported_output=exported_output,
|
||||
raw_output=result,
|
||||
)
|
||||
|
||||
if self.callback:
|
||||
self.callback(self.output)
|
||||
|
||||
return exported_output
|
||||
|
||||
def prompt(self) -> str:
|
||||
"""Prompt the task.
|
||||
|
||||
Returns:
|
||||
@@ -122,9 +181,69 @@ class Task(BaseModel):
|
||||
"""
|
||||
tasks_slices = [self.description]
|
||||
|
||||
if self.expected_output:
|
||||
output = self.i18n.slice("expected_output").format(
|
||||
expected_output=self.expected_output
|
||||
)
|
||||
tasks_slices = [self.description, output]
|
||||
output = self.i18n.slice("expected_output").format(
|
||||
expected_output=self.expected_output
|
||||
)
|
||||
tasks_slices = [self.description, output]
|
||||
return "\n".join(tasks_slices)
|
||||
|
||||
def interpolate_inputs(self, inputs: Dict[str, Any]) -> None:
|
||||
"""Interpolate inputs into the task description and expected output."""
|
||||
if inputs:
|
||||
self.description = self.description.format(**inputs)
|
||||
self.expected_output = self.expected_output.format(**inputs)
|
||||
|
||||
def increment_tools_errors(self) -> None:
|
||||
"""Increment the tools errors counter."""
|
||||
self.tools_errors += 1
|
||||
|
||||
def increment_delegations(self) -> None:
|
||||
"""Increment the delegations counter."""
|
||||
self.delegations += 1
|
||||
|
||||
def _export_output(self, result: str) -> Any:
|
||||
exported_result = result
|
||||
instructions = "I'm gonna convert this raw text into valid JSON."
|
||||
|
||||
if self.output_pydantic or self.output_json:
|
||||
model = self.output_pydantic or self.output_json
|
||||
llm = self.agent.function_calling_llm or self.agent.llm
|
||||
|
||||
if not self._is_gpt(llm):
|
||||
model_schema = PydanticSchemaParser(model=model).get_schema()
|
||||
instructions = f"{instructions}\n\nThe json should have the following structure, with the following keys:\n{model_schema}"
|
||||
|
||||
converter = Converter(
|
||||
llm=llm, text=result, model=model, instructions=instructions
|
||||
)
|
||||
|
||||
if self.output_pydantic:
|
||||
exported_result = converter.to_pydantic()
|
||||
elif self.output_json:
|
||||
exported_result = converter.to_json()
|
||||
|
||||
if isinstance(exported_result, ConverterError):
|
||||
Printer().print(
|
||||
content=f"{exported_result.message} Using raw output instead.",
|
||||
color="red",
|
||||
)
|
||||
exported_result = result
|
||||
|
||||
if self.output_file:
|
||||
content = (
|
||||
exported_result if not self.output_pydantic else exported_result.json()
|
||||
)
|
||||
self._save_file(content)
|
||||
|
||||
return exported_result
|
||||
|
||||
def _is_gpt(self, llm) -> bool:
|
||||
return isinstance(llm, ChatOpenAI) and llm.openai_api_base == None
|
||||
|
||||
def _save_file(self, result: Any) -> None:
|
||||
with open(self.output_file, "w") as file:
|
||||
file.write(result)
|
||||
return None
|
||||
|
||||
def __repr__(self):
|
||||
return f"Task(description={self.description}, expected_output={self.expected_output})"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Optional
|
||||
from typing import Optional, Union
|
||||
|
||||
from pydantic import BaseModel, Field, model_validator
|
||||
|
||||
@@ -8,10 +8,16 @@ class TaskOutput(BaseModel):
|
||||
|
||||
description: str = Field(description="Description of the task")
|
||||
summary: Optional[str] = Field(description="Summary of the task", default=None)
|
||||
result: str = Field(description="Result of the task")
|
||||
exported_output: Union[str, BaseModel] = Field(
|
||||
description="Output of the task", default=None
|
||||
)
|
||||
raw_output: str = Field(description="Result of the task")
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_summary(self):
|
||||
excerpt = " ".join(self.description.split(" ")[:10])
|
||||
self.summary = f"{excerpt}..."
|
||||
return self
|
||||
|
||||
def result(self):
|
||||
return self.exported_output
|
||||
|
||||
262
src/crewai/telemetry/telemetry.py
Normal file
262
src/crewai/telemetry/telemetry.py
Normal file
@@ -0,0 +1,262 @@
|
||||
import json
|
||||
import os
|
||||
import platform
|
||||
from typing import Any
|
||||
|
||||
import pkg_resources
|
||||
from opentelemetry import trace
|
||||
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
|
||||
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
|
||||
from opentelemetry.sdk.trace import TracerProvider
|
||||
from opentelemetry.sdk.trace.export import BatchSpanProcessor
|
||||
from opentelemetry.trace import Status, StatusCode
|
||||
|
||||
|
||||
class Telemetry:
|
||||
"""A class to handle anonymous telemetry for the crewai package.
|
||||
|
||||
The data being collected is for development purpose, all data is anonymous.
|
||||
|
||||
There is NO data being collected on the prompts, tasks descriptions
|
||||
agents backstories or goals nor responses or any data that is being
|
||||
processed by the agents, nor any secrets and env vars.
|
||||
|
||||
Data collected includes:
|
||||
- Version of crewAI
|
||||
- Version of Python
|
||||
- General OS (e.g. number of CPUs, macOS/Windows/Linux)
|
||||
- Number of agents and tasks in a crew
|
||||
- Crew Process being used
|
||||
- If Agents are using memory or allowing delegation
|
||||
- If Tasks are being executed in parallel or sequentially
|
||||
- Language model being used
|
||||
- Roles of agents in a crew
|
||||
- Tools names available
|
||||
|
||||
Users can opt-in to sharing more complete data suing the `share_crew`
|
||||
attribute in the Crew class.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.ready = False
|
||||
try:
|
||||
telemetry_endpoint = "http://telemetry.crewai.com:4318"
|
||||
self.resource = Resource(
|
||||
attributes={SERVICE_NAME: "crewAI-telemetry"},
|
||||
)
|
||||
self.provider = TracerProvider(resource=self.resource)
|
||||
processor = BatchSpanProcessor(
|
||||
OTLPSpanExporter(endpoint=f"{telemetry_endpoint}/v1/traces", timeout=15)
|
||||
)
|
||||
self.provider.add_span_processor(processor)
|
||||
self.ready = True
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def set_tracer(self):
|
||||
if self.ready:
|
||||
try:
|
||||
trace.set_tracer_provider(self.provider)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def crew_creation(self, crew):
|
||||
"""Records the creation of a crew."""
|
||||
if self.ready:
|
||||
try:
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Crew Created")
|
||||
self._add_attribute(
|
||||
span,
|
||||
"crewai_version",
|
||||
pkg_resources.get_distribution("crewai").version,
|
||||
)
|
||||
self._add_attribute(span, "python_version", platform.python_version())
|
||||
self._add_attribute(span, "crew_id", str(crew.id))
|
||||
self._add_attribute(span, "crew_process", crew.process)
|
||||
self._add_attribute(span, "crew_language", crew.language)
|
||||
self._add_attribute(span, "crew_number_of_tasks", len(crew.tasks))
|
||||
self._add_attribute(span, "crew_number_of_agents", len(crew.agents))
|
||||
self._add_attribute(
|
||||
span,
|
||||
"crew_agents",
|
||||
json.dumps(
|
||||
[
|
||||
{
|
||||
"id": str(agent.id),
|
||||
"role": agent.role,
|
||||
"memory_enabled?": agent.memory,
|
||||
"verbose?": agent.verbose,
|
||||
"max_iter": agent.max_iter,
|
||||
"max_rpm": agent.max_rpm,
|
||||
"i18n": agent.i18n.language,
|
||||
"llm": json.dumps(self._safe_llm_attributes(agent.llm)),
|
||||
"delegation_enabled?": agent.allow_delegation,
|
||||
"tools_names": [tool.name for tool in agent.tools],
|
||||
}
|
||||
for agent in crew.agents
|
||||
]
|
||||
),
|
||||
)
|
||||
self._add_attribute(
|
||||
span,
|
||||
"crew_tasks",
|
||||
json.dumps(
|
||||
[
|
||||
{
|
||||
"id": str(task.id),
|
||||
"async_execution?": task.async_execution,
|
||||
"agent_role": task.agent.role if task.agent else "None",
|
||||
"tools_names": [tool.name for tool in task.tools],
|
||||
}
|
||||
for task in crew.tasks
|
||||
]
|
||||
),
|
||||
)
|
||||
self._add_attribute(span, "platform", platform.platform())
|
||||
self._add_attribute(span, "platform_release", platform.release())
|
||||
self._add_attribute(span, "platform_system", platform.system())
|
||||
self._add_attribute(span, "platform_version", platform.version())
|
||||
self._add_attribute(span, "cpus", os.cpu_count())
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def tool_repeated_usage(self, llm: Any, tool_name: str, attempts: int):
|
||||
"""Records the repeated usage 'error' of a tool by an agent."""
|
||||
if self.ready:
|
||||
try:
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Tool Repeated Usage")
|
||||
self._add_attribute(span, "tool_name", tool_name)
|
||||
self._add_attribute(span, "attempts", attempts)
|
||||
self._add_attribute(
|
||||
span, "llm", json.dumps(self._safe_llm_attributes(llm))
|
||||
)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def tool_usage(self, llm: Any, tool_name: str, attempts: int):
|
||||
"""Records the usage of a tool by an agent."""
|
||||
if self.ready:
|
||||
try:
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Tool Usage")
|
||||
self._add_attribute(span, "tool_name", tool_name)
|
||||
self._add_attribute(span, "attempts", attempts)
|
||||
self._add_attribute(
|
||||
span, "llm", json.dumps(self._safe_llm_attributes(llm))
|
||||
)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def tool_usage_error(self, llm: Any):
|
||||
"""Records the usage of a tool by an agent."""
|
||||
if self.ready:
|
||||
try:
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Tool Usage Error")
|
||||
self._add_attribute(
|
||||
span, "llm", json.dumps(self._safe_llm_attributes(llm))
|
||||
)
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def crew_execution_span(self, crew):
|
||||
"""Records the complete execution of a crew.
|
||||
This is only collected if the user has opted-in to share the crew.
|
||||
"""
|
||||
if (self.ready) and (crew.share_crew):
|
||||
try:
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Crew Execution")
|
||||
self._add_attribute(span, "crew_id", str(crew.id))
|
||||
self._add_attribute(
|
||||
span,
|
||||
"crew_agents",
|
||||
json.dumps(
|
||||
[
|
||||
{
|
||||
"id": str(agent.id),
|
||||
"role": agent.role,
|
||||
"goal": agent.goal,
|
||||
"backstory": agent.backstory,
|
||||
"memory_enabled?": agent.memory,
|
||||
"verbose?": agent.verbose,
|
||||
"max_iter": agent.max_iter,
|
||||
"max_rpm": agent.max_rpm,
|
||||
"i18n": agent.i18n.language,
|
||||
"llm": json.dumps(self._safe_llm_attributes(agent.llm)),
|
||||
"delegation_enabled?": agent.allow_delegation,
|
||||
"tools_names": [tool.name for tool in agent.tools],
|
||||
}
|
||||
for agent in crew.agents
|
||||
]
|
||||
),
|
||||
)
|
||||
self._add_attribute(
|
||||
span,
|
||||
"crew_tasks",
|
||||
json.dumps(
|
||||
[
|
||||
{
|
||||
"id": str(task.id),
|
||||
"description": task.description,
|
||||
"async_execution?": task.async_execution,
|
||||
"output": task.expected_output,
|
||||
"agent_role": task.agent.role if task.agent else "None",
|
||||
"context": [task.description for task in task.context]
|
||||
if task.context
|
||||
else "None",
|
||||
"tools_names": [tool.name for tool in task.tools],
|
||||
}
|
||||
for task in crew.tasks
|
||||
]
|
||||
),
|
||||
)
|
||||
return span
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def end_crew(self, crew, output):
|
||||
if (self.ready) and (crew.share_crew):
|
||||
try:
|
||||
self._add_attribute(crew._execution_span, "crew_output", output)
|
||||
self._add_attribute(
|
||||
crew._execution_span,
|
||||
"crew_tasks_output",
|
||||
json.dumps(
|
||||
[
|
||||
{
|
||||
"id": str(task.id),
|
||||
"description": task.description,
|
||||
"output": task.output.raw_output,
|
||||
}
|
||||
for task in crew.tasks
|
||||
]
|
||||
),
|
||||
)
|
||||
crew._execution_span.set_status(Status(StatusCode.OK))
|
||||
crew._execution_span.end()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def _add_attribute(self, span, key, value):
|
||||
"""Add an attribute to a span."""
|
||||
try:
|
||||
return span.set_attribute(key, value)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def _safe_llm_attributes(self, llm):
|
||||
attributes = ["name", "model_name", "base_url", "model", "top_k", "temperature"]
|
||||
safe_attributes = {k: v for k, v in vars(llm).items() if k in attributes}
|
||||
safe_attributes["class"] = llm.__class__.__name__
|
||||
return safe_attributes
|
||||
@@ -1,114 +0,0 @@
|
||||
import json
|
||||
import os
|
||||
import platform
|
||||
import socket
|
||||
|
||||
import pkg_resources
|
||||
from opentelemetry import trace
|
||||
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
|
||||
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
|
||||
from opentelemetry.sdk.trace import TracerProvider
|
||||
from opentelemetry.sdk.trace.export import BatchSpanProcessor
|
||||
from opentelemetry.trace import Status, StatusCode
|
||||
|
||||
|
||||
class Telemetry:
|
||||
"""A class to handle anonymous telemetry for the crewai package.
|
||||
|
||||
The data being collected is for development purpose, all data is anonymous.
|
||||
|
||||
There is NO data being collected on the prompts, tasks descriptions
|
||||
agents backstories or goals nor responses or any data that is being
|
||||
processed by the agents, nor any secrets and env vars.
|
||||
|
||||
Data collected includes:
|
||||
- Version of crewAI
|
||||
- Version of Python
|
||||
- General OS (e.g. number of CPUs, macOS/Windows/Linux)
|
||||
- Number of agents and tasks in a crew
|
||||
- Crew Process being used
|
||||
- If Agents are using memory or allowing delegation
|
||||
- If Tasks are being executed in parallel or sequentially
|
||||
- Language model being used
|
||||
- Roles of agents in a crew
|
||||
- Tools names available
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
telemetry_endpoint = "http://telemetry.crewai.com:4318"
|
||||
self.resource = Resource(attributes={SERVICE_NAME: "crewAI-telemetry"})
|
||||
provider = TracerProvider(resource=self.resource)
|
||||
processor = BatchSpanProcessor(
|
||||
OTLPSpanExporter(endpoint=f"{telemetry_endpoint}/v1/traces")
|
||||
)
|
||||
provider.add_span_processor(processor)
|
||||
trace.set_tracer_provider(provider)
|
||||
|
||||
def crew_creation(self, crew):
|
||||
"""Records the creation of a crew."""
|
||||
try:
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Crew Created")
|
||||
self.add_attribute(
|
||||
span, "crewai_version", pkg_resources.get_distribution("crewai").version
|
||||
)
|
||||
self.add_attribute(span, "python_version", platform.python_version())
|
||||
self.add_attribute(span, "hostname", socket.gethostname())
|
||||
self.add_attribute(span, "crewid", str(crew.id))
|
||||
self.add_attribute(span, "crew_process", crew.process)
|
||||
self.add_attribute(span, "crew_language", crew.language)
|
||||
self.add_attribute(span, "crew_number_of_tasks", len(crew.tasks))
|
||||
self.add_attribute(span, "crew_number_of_agents", len(crew.agents))
|
||||
self.add_attribute(
|
||||
span,
|
||||
"crew_agents",
|
||||
json.dumps(
|
||||
[
|
||||
{
|
||||
"id": str(agent.id),
|
||||
"role": agent.role,
|
||||
"memory_enabled?": agent.memory,
|
||||
"llm": json.dumps(self._safe_llm_attributes(agent.llm)),
|
||||
"delegation_enabled?": agent.allow_delegation,
|
||||
"tools_names": [tool.name for tool in agent.tools],
|
||||
}
|
||||
for agent in crew.agents
|
||||
]
|
||||
),
|
||||
)
|
||||
self.add_attribute(
|
||||
span,
|
||||
"crew_tasks",
|
||||
json.dumps(
|
||||
[
|
||||
{
|
||||
"id": str(task.id),
|
||||
"async_execution?": task.async_execution,
|
||||
"tools_names": [tool.name for tool in task.tools],
|
||||
}
|
||||
for task in crew.tasks
|
||||
]
|
||||
),
|
||||
)
|
||||
self.add_attribute(span, "platform", platform.platform())
|
||||
self.add_attribute(span, "platform_release", platform.release())
|
||||
self.add_attribute(span, "platform_system", platform.system())
|
||||
self.add_attribute(span, "platform_version", platform.version())
|
||||
self.add_attribute(span, "cpus", os.cpu_count())
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def add_attribute(self, span, key, value):
|
||||
"""Add an attribute to a span."""
|
||||
try:
|
||||
return span.set_attribute(key, value)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def _safe_llm_attributes(self, llm):
|
||||
attributes = ["name", "model_name", "base_url", "model", "top_k", "temperature"]
|
||||
safe_attributes = {k: v for k, v in vars(llm).items() if k in attributes}
|
||||
safe_attributes["class"] = llm.__class__.__name__
|
||||
return safe_attributes
|
||||
@@ -1,9 +1,10 @@
|
||||
from typing import List
|
||||
|
||||
from langchain.tools import Tool
|
||||
from langchain.tools import StructuredTool
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.task import Task
|
||||
from crewai.utilities import I18N
|
||||
|
||||
|
||||
@@ -15,50 +16,52 @@ class AgentTools(BaseModel):
|
||||
|
||||
def tools(self):
|
||||
return [
|
||||
Tool.from_function(
|
||||
StructuredTool.from_function(
|
||||
func=self.delegate_work,
|
||||
name="Delegate work to co-worker",
|
||||
description=self.i18n.tools("delegate_work").format(
|
||||
coworkers=", ".join([agent.role for agent in self.agents])
|
||||
coworkers=[f"{agent.role}" for agent in self.agents]
|
||||
),
|
||||
),
|
||||
Tool.from_function(
|
||||
StructuredTool.from_function(
|
||||
func=self.ask_question,
|
||||
name="Ask question to co-worker",
|
||||
description=self.i18n.tools("ask_question").format(
|
||||
coworkers=", ".join([agent.role for agent in self.agents])
|
||||
coworkers=[f"{agent.role}" for agent in self.agents]
|
||||
),
|
||||
),
|
||||
]
|
||||
|
||||
def delegate_work(self, command):
|
||||
"""Useful to delegate a specific task to a coworker."""
|
||||
return self._execute(command)
|
||||
def delegate_work(self, coworker: str, task: str, context: str):
|
||||
"""Useful to delegate a specific task to a coworker passing all necessary context and names."""
|
||||
return self._execute(coworker, task, context)
|
||||
|
||||
def ask_question(self, command):
|
||||
"""Useful to ask a question, opinion or take from a coworker."""
|
||||
return self._execute(command)
|
||||
def ask_question(self, coworker: str, question: str, context: str):
|
||||
"""Useful to ask a question, opinion or take from a coworker passing all necessary context and names."""
|
||||
return self._execute(coworker, question, context)
|
||||
|
||||
def _execute(self, command):
|
||||
def _execute(self, agent, task, context):
|
||||
"""Execute the command."""
|
||||
try:
|
||||
agent, task, context = command.split("|")
|
||||
except ValueError:
|
||||
return self.i18n.errors("agent_tool_missing_param")
|
||||
|
||||
if not agent or not task or not context:
|
||||
return self.i18n.errors("agent_tool_missing_param")
|
||||
|
||||
agent = [
|
||||
available_agent
|
||||
for available_agent in self.agents
|
||||
if available_agent.role == agent
|
||||
]
|
||||
agent = [
|
||||
available_agent
|
||||
for available_agent in self.agents
|
||||
if available_agent.role.strip().lower() == agent.strip().lower()
|
||||
]
|
||||
except:
|
||||
return self.i18n.errors("agent_tool_unexsiting_coworker").format(
|
||||
coworkers="\n".join([f"- {agent.role}" for agent in self.agents])
|
||||
)
|
||||
|
||||
if not agent:
|
||||
return self.i18n.errors("agent_tool_unexsiting_coworker").format(
|
||||
coworkers=", ".join([agent.role for agent in self.agents])
|
||||
coworkers="\n".join([f"- {agent.role}" for agent in self.agents])
|
||||
)
|
||||
|
||||
agent = agent[0]
|
||||
task = Task(
|
||||
description=task,
|
||||
agent=agent,
|
||||
expected_output="Your best answer to your coworker asking you this, accounting for the context shared.",
|
||||
)
|
||||
return agent.execute_task(task, context)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from langchain.tools import Tool
|
||||
from langchain.tools import StructuredTool
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
from crewai.agents.cache import CacheHandler
|
||||
@@ -15,7 +15,7 @@ class CacheTools(BaseModel):
|
||||
)
|
||||
|
||||
def tool(self):
|
||||
return Tool.from_function(
|
||||
return StructuredTool.from_function(
|
||||
func=self.hit_cache,
|
||||
name=self.name,
|
||||
description="Reads directly from the cache",
|
||||
|
||||
21
src/crewai/tools/tool_calling.py
Normal file
21
src/crewai/tools/tool_calling.py
Normal file
@@ -0,0 +1,21 @@
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from pydantic import BaseModel as PydanticBaseModel
|
||||
from pydantic import Field as PydanticField
|
||||
from pydantic.v1 import BaseModel, Field
|
||||
|
||||
|
||||
class ToolCalling(BaseModel):
|
||||
tool_name: str = Field(..., description="The name of the tool to be called.")
|
||||
arguments: Optional[Dict[str, Any]] = Field(
|
||||
..., description="A dictinary of arguments to be passed to the tool."
|
||||
)
|
||||
|
||||
|
||||
class InstructorToolCalling(PydanticBaseModel):
|
||||
tool_name: str = PydanticField(
|
||||
..., description="The name of the tool to be called."
|
||||
)
|
||||
arguments: Optional[Dict[str, Any]] = PydanticField(
|
||||
..., description="A dictinary of arguments to be passed to the tool."
|
||||
)
|
||||
39
src/crewai/tools/tool_output_parser.py
Normal file
39
src/crewai/tools/tool_output_parser.py
Normal file
@@ -0,0 +1,39 @@
|
||||
import json
|
||||
from typing import Any, List
|
||||
|
||||
import regex
|
||||
from langchain.output_parsers import PydanticOutputParser
|
||||
from langchain_core.exceptions import OutputParserException
|
||||
from langchain_core.outputs import Generation
|
||||
from langchain_core.pydantic_v1 import ValidationError
|
||||
|
||||
|
||||
class ToolOutputParser(PydanticOutputParser):
|
||||
"""Parses the function calling of a tool usage and it's arguments."""
|
||||
|
||||
def parse_result(self, result: List[Generation], *, partial: bool = False) -> Any:
|
||||
result[0].text = self._transform_in_valid_json(result[0].text)
|
||||
json_object = super().parse_result(result)
|
||||
try:
|
||||
return self.pydantic_object.parse_obj(json_object)
|
||||
except ValidationError as e:
|
||||
name = self.pydantic_object.__name__
|
||||
msg = f"Failed to parse {name} from completion {json_object}. Got: {e}"
|
||||
raise OutputParserException(msg, llm_output=json_object)
|
||||
|
||||
def _transform_in_valid_json(self, text) -> str:
|
||||
text = text.replace("```", "").replace("json", "")
|
||||
json_pattern = r"\{(?:[^{}]|(?R))*\}"
|
||||
matches = regex.finditer(json_pattern, text)
|
||||
|
||||
for match in matches:
|
||||
try:
|
||||
# Attempt to parse the matched string as JSON
|
||||
json_obj = json.loads(match.group())
|
||||
# Return the first successfully parsed JSON object
|
||||
json_obj = json.dumps(json_obj)
|
||||
return str(json_obj)
|
||||
except json.JSONDecodeError:
|
||||
# If parsing fails, skip to the next match
|
||||
continue
|
||||
return text
|
||||
287
src/crewai/tools/tool_usage.py
Normal file
287
src/crewai/tools/tool_usage.py
Normal file
@@ -0,0 +1,287 @@
|
||||
import ast
|
||||
from textwrap import dedent
|
||||
from typing import Any, List, Union
|
||||
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.telemetry import Telemetry
|
||||
from crewai.tools.tool_calling import InstructorToolCalling, ToolCalling
|
||||
from crewai.utilities import I18N, Converter, ConverterError, Printer
|
||||
|
||||
OPENAI_BIGGER_MODELS = ["gpt-4"]
|
||||
|
||||
|
||||
class ToolUsageErrorException(Exception):
|
||||
"""Exception raised for errors in the tool usage."""
|
||||
|
||||
def __init__(self, message: str) -> None:
|
||||
self.message = message
|
||||
super().__init__(self.message)
|
||||
|
||||
|
||||
class ToolUsage:
|
||||
"""
|
||||
Class that represents the usage of a tool by an agent.
|
||||
|
||||
Attributes:
|
||||
task: Task being executed.
|
||||
tools_handler: Tools handler that will manage the tool usage.
|
||||
tools: List of tools available for the agent.
|
||||
tools_description: Description of the tools available for the agent.
|
||||
tools_names: Names of the tools available for the agent.
|
||||
function_calling_llm: Language model to be used for the tool usage.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
tools_handler: ToolsHandler,
|
||||
tools: List[BaseTool],
|
||||
tools_description: str,
|
||||
tools_names: str,
|
||||
task: Any,
|
||||
function_calling_llm: Any,
|
||||
action: Any,
|
||||
) -> None:
|
||||
self._i18n: I18N = I18N()
|
||||
self._printer: Printer = Printer()
|
||||
self._telemetry: Telemetry = Telemetry()
|
||||
self._run_attempts: int = 1
|
||||
self._max_parsing_attempts: int = 3
|
||||
self._remember_format_after_usages: int = 3
|
||||
self.tools_description = tools_description
|
||||
self.tools_names = tools_names
|
||||
self.tools_handler = tools_handler
|
||||
self.tools = tools
|
||||
self.task = task
|
||||
self.action = action
|
||||
self.function_calling_llm = function_calling_llm
|
||||
|
||||
# Set the maximum parsing attempts for bigger models
|
||||
if (isinstance(self.function_calling_llm, ChatOpenAI)) and (
|
||||
self.function_calling_llm.openai_api_base == None
|
||||
):
|
||||
if self.function_calling_llm.model_name in OPENAI_BIGGER_MODELS:
|
||||
self._max_parsing_attempts = 2
|
||||
self._remember_format_after_usages = 4
|
||||
|
||||
def parse(self, tool_string: str):
|
||||
"""Parse the tool string and return the tool calling."""
|
||||
return self._tool_calling(tool_string)
|
||||
|
||||
def use(
|
||||
self, calling: Union[ToolCalling, InstructorToolCalling], tool_string: str
|
||||
) -> str:
|
||||
if isinstance(calling, ToolUsageErrorException):
|
||||
error = calling.message
|
||||
self._printer.print(content=f"\n\n{error}\n", color="red")
|
||||
self.task.increment_tools_errors()
|
||||
return error
|
||||
try:
|
||||
tool = self._select_tool(calling.tool_name)
|
||||
except Exception as e:
|
||||
error = getattr(e, "message", str(e))
|
||||
self.task.increment_tools_errors()
|
||||
self._printer.print(content=f"\n\n{error}\n", color="red")
|
||||
return error
|
||||
return f"{self._use(tool_string=tool_string, tool=tool, calling=calling)}"
|
||||
|
||||
def _use(
|
||||
self,
|
||||
tool_string: str,
|
||||
tool: BaseTool,
|
||||
calling: Union[ToolCalling, InstructorToolCalling],
|
||||
) -> None:
|
||||
if self._check_tool_repeated_usage(calling=calling):
|
||||
try:
|
||||
result = self._i18n.errors("task_repeated_usage").format(
|
||||
tool_names=self.tools_names
|
||||
)
|
||||
self._printer.print(content=f"\n\n{result}\n", color="yellow")
|
||||
self._telemetry.tool_repeated_usage(
|
||||
llm=self.function_calling_llm,
|
||||
tool_name=tool.name,
|
||||
attempts=self._run_attempts,
|
||||
)
|
||||
result = self._format_result(result=result)
|
||||
return result
|
||||
except Exception:
|
||||
self.task.increment_tools_errors()
|
||||
|
||||
result = self.tools_handler.cache.read(
|
||||
tool=calling.tool_name, input=calling.arguments
|
||||
)
|
||||
|
||||
if not result:
|
||||
try:
|
||||
if calling.tool_name in [
|
||||
"Delegate work to co-worker",
|
||||
"Ask question to co-worker",
|
||||
]:
|
||||
self.task.increment_delegations()
|
||||
|
||||
if calling.arguments:
|
||||
try:
|
||||
acceptable_args = tool.args_schema.schema()["properties"].keys()
|
||||
arguments = {
|
||||
k: v
|
||||
for k, v in calling.arguments.items()
|
||||
if k in acceptable_args
|
||||
}
|
||||
result = tool._run(**arguments)
|
||||
except Exception:
|
||||
if tool.args_schema:
|
||||
arguments = calling.arguments
|
||||
result = tool._run(**arguments)
|
||||
else:
|
||||
arguments = calling.arguments.values()
|
||||
result = tool._run(*arguments)
|
||||
else:
|
||||
result = tool._run()
|
||||
except Exception as e:
|
||||
self._run_attempts += 1
|
||||
if self._run_attempts > self._max_parsing_attempts:
|
||||
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
|
||||
error_message = self._i18n.errors("tool_usage_exception").format(
|
||||
error=e, tool=tool.name, tool_inputs=tool.description
|
||||
)
|
||||
error = ToolUsageErrorException(
|
||||
f'\n{error_message}.\nMoving on then. {self._i18n.slice("format").format(tool_names=self.tools_names)}'
|
||||
).message
|
||||
self.task.increment_tools_errors()
|
||||
self._printer.print(content=f"\n\n{error_message}\n", color="red")
|
||||
return error
|
||||
self.task.increment_tools_errors()
|
||||
return self.use(calling=calling, tool_string=tool_string)
|
||||
|
||||
self.tools_handler.on_tool_use(calling=calling, output=result)
|
||||
|
||||
self._printer.print(content=f"\n\n{result}\n", color="yellow")
|
||||
self._telemetry.tool_usage(
|
||||
llm=self.function_calling_llm,
|
||||
tool_name=tool.name,
|
||||
attempts=self._run_attempts,
|
||||
)
|
||||
result = self._format_result(result=result)
|
||||
return result
|
||||
|
||||
def _format_result(self, result: Any) -> None:
|
||||
self.task.used_tools += 1
|
||||
if self._should_remember_format():
|
||||
result = self._remember_format(result=result)
|
||||
return result
|
||||
|
||||
def _should_remember_format(self) -> None:
|
||||
return self.task.used_tools % self._remember_format_after_usages == 0
|
||||
|
||||
def _remember_format(self, result: str) -> None:
|
||||
result = str(result)
|
||||
result += "\n\n" + self._i18n.slice("tools").format(
|
||||
tools=self.tools_description, tool_names=self.tools_names
|
||||
)
|
||||
return result
|
||||
|
||||
def _check_tool_repeated_usage(
|
||||
self, calling: Union[ToolCalling, InstructorToolCalling]
|
||||
) -> None:
|
||||
if last_tool_usage := self.tools_handler.last_used_tool:
|
||||
return (calling.tool_name == last_tool_usage.tool_name) and (
|
||||
calling.arguments == last_tool_usage.arguments
|
||||
)
|
||||
|
||||
def _select_tool(self, tool_name: str) -> BaseTool:
|
||||
for tool in self.tools:
|
||||
if tool.name.lower().strip() == tool_name.lower().strip():
|
||||
return tool
|
||||
self.task.increment_tools_errors()
|
||||
if tool_name and tool_name != "":
|
||||
raise Exception(
|
||||
f"Action '{tool_name}' don't exist, these are the only available Actions: {self.tools_description}"
|
||||
)
|
||||
else:
|
||||
raise Exception(
|
||||
f"I forgot the Action name, these are the only available Actions: {self.tools_description}"
|
||||
)
|
||||
|
||||
def _render(self) -> str:
|
||||
"""Render the tool name and description in plain text."""
|
||||
descriptions = []
|
||||
for tool in self.tools:
|
||||
args = {
|
||||
k: {k2: v2 for k2, v2 in v.items() if k2 in ["description", "type"]}
|
||||
for k, v in tool.args.items()
|
||||
}
|
||||
descriptions.append(
|
||||
"\n".join(
|
||||
[
|
||||
f"Tool Name: {tool.name.lower()}",
|
||||
f"Tool Description: {tool.description}",
|
||||
f"Tool Arguments: {args}",
|
||||
]
|
||||
)
|
||||
)
|
||||
return "\n--\n".join(descriptions)
|
||||
|
||||
def _is_gpt(self, llm) -> bool:
|
||||
return isinstance(llm, ChatOpenAI) and llm.openai_api_base == None
|
||||
|
||||
def _tool_calling(
|
||||
self, tool_string: str
|
||||
) -> Union[ToolCalling, InstructorToolCalling]:
|
||||
try:
|
||||
if self.function_calling_llm:
|
||||
model = (
|
||||
InstructorToolCalling
|
||||
if self._is_gpt(self.function_calling_llm)
|
||||
else ToolCalling
|
||||
)
|
||||
converter = Converter(
|
||||
text=f"Only tools available:\n###\n{self._render()}\n\nReturn a valid schema for the tool, the tool name must be exactly equal one of the options, use this text to inform the valid ouput schema:\n\n{tool_string}```",
|
||||
llm=self.function_calling_llm,
|
||||
model=model,
|
||||
instructions=dedent(
|
||||
"""\
|
||||
The schema should have the following structure, only two keys:
|
||||
- tool_name: str
|
||||
- arguments: dict (with all arguments being passed)
|
||||
|
||||
Example:
|
||||
{"tool_name": "tool name", "arguments": {"arg_name1": "value", "arg_name2": 2}}""",
|
||||
),
|
||||
max_attemps=1,
|
||||
)
|
||||
calling = converter.to_pydantic()
|
||||
|
||||
if isinstance(calling, ConverterError):
|
||||
raise calling
|
||||
else:
|
||||
tool_name = self.action.tool
|
||||
tool = self._select_tool(tool_name)
|
||||
try:
|
||||
arguments = ast.literal_eval(self.action.tool_input)
|
||||
except Exception:
|
||||
return ToolUsageErrorException(
|
||||
f'{self._i18n.errors("tool_arguments_error")}'
|
||||
)
|
||||
if not isinstance(arguments, dict):
|
||||
return ToolUsageErrorException(
|
||||
f'{self._i18n.errors("tool_arguments_error")}'
|
||||
)
|
||||
calling = ToolCalling(
|
||||
tool_name=tool.name,
|
||||
arguments=arguments,
|
||||
log=tool_string,
|
||||
)
|
||||
except Exception as e:
|
||||
self._run_attempts += 1
|
||||
if self._run_attempts > self._max_parsing_attempts:
|
||||
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
|
||||
self.task.increment_tools_errors()
|
||||
self._printer.print(content=f"\n\n{e}\n", color="red")
|
||||
return ToolUsageErrorException(
|
||||
f'{self._i18n.errors("tool_usage_error").format(error=e)}\nMoving on then. {self._i18n.slice("format").format(tool_names=self.tools_names)}'
|
||||
)
|
||||
return self._tool_calling(tool_string)
|
||||
|
||||
return calling
|
||||
@@ -9,18 +9,19 @@
|
||||
"task": "Αρχή! Αυτό είναι ΠΟΛΥ σημαντικό για εσάς, η δουλειά σας εξαρτάται από αυτό!\n\nΤρέχουσα εργασία: {input}",
|
||||
"memory": "Αυτή είναι η περίληψη της μέχρι τώρα δουλειάς σας:\n{chat_history}",
|
||||
"role_playing": "Είσαι {role}.\n{backstory}\n\nΟ προσωπικός σας στόχος είναι: {goal}",
|
||||
"tools": "ΕΡΓΑΛΕΙΑ:\n------\nΈχετε πρόσβαση μόνο στα ακόλουθα εργαλεία:\n\n{tools}\n\nΓια να χρησιμοποιήσετε ένα εργαλείο, χρησιμοποιήστε την ακόλουθη ακριβώς μορφή:\n\n```\nΣκέψη: Χρειάζεται να χρησιμοποιήσω κάποιο εργαλείο; Ναί\nΔράση: η ενέργεια που πρέπει να γίνει, πρέπει να είναι μία από τις[{tool_names}], μόνο το όνομα.\nΕνέργεια προς εισαγωγή: η είσοδος στη δράση\nΠαρατήρηση: το αποτέλεσμα της δράσης\n```\n\nΌταν έχετε μια απάντηση για την εργασία σας ή εάν δεν χρειάζεται να χρησιμοποιήσετε ένα εργαλείο, ΠΡΕΠΕΙ να χρησιμοποιήσετε τη μορφή:\n\n```\nΣκέψη: Χρειάζεται να χρησιμοποιήσω κάποιο εργαλείο; Οχι\nΤελική απάντηση: [η απάντησή σας εδώ]```",
|
||||
"tools": "ΕΡΓΑΛΕΙΑ:\n------\nΈχετε πρόσβαση μόνο στα ακόλουθα εργαλεία:\n\n{tools}\n\nΓια να χρησιμοποιήσετε ένα εργαλείο, χρησιμοποιήστε την ακόλουθη ακριβώς μορφή:\n\n```\nThought: Χρειάζεται να χρησιμοποιήσω κάποιο εργαλείο; Ναι\nΕνέργεια: το εργαλείο που θέλετε να χρησιμοποιήσετε, θα πρέπει να είναι ένα από τα [{tool_names}], μόνο το όνομα.\nΕισαγωγή ενέργειας: Οποιαδήποτε και όλες οι σχετικές πληροφορίες και το πλαίσιο χρήσης του εργαλείου\nΠαρατήρηση: το αποτέλεσμα της χρήσης του εργαλείου\n```\n\nΌταν έχετε μια απάντηση για την εργασία σας ή εάν δεν χρειάζεται να χρησιμοποιήσετε ένα εργαλείο, ΠΡΕΠΕΙ να χρησιμοποιήσετε τη μορφή:\n\n```\nΣκέψη: Πρέπει να χρησιμοποιήσω ένα εργαλείο ? Όχι\nΤελική απάντηση: [η απάντησή σας εδώ]```",
|
||||
"task_with_context": "{task}\nΑυτό είναι το πλαίσιο με το οποίο εργάζεστε:\n{context}",
|
||||
"expected_output": "Η τελική σας απάντηση πρέπει να είναι: {expected_output}"
|
||||
},
|
||||
"errors": {
|
||||
"force_final_answer": "Στην πραγματικότητα, χρησιμοποίησα πάρα πολλά εργαλεία, οπότε θα σταματήσω τώρα και θα σας δώσω την απόλυτη ΚΑΛΥΤΕΡΗ τελική μου απάντηση ΤΩΡΑ, χρησιμοποιώντας την αναμενόμενη μορφή: ```\nΣκέφτηκα: Χρειάζεται να χρησιμοποιήσω ένα εργαλείο; Όχι\nΤελική απάντηση: [η απάντησή σας εδώ]```",
|
||||
"agent_tool_missing_param": "\nΣφάλμα κατά την εκτέλεση του εργαλείου. Λείπουν ακριβώς 3 διαχωρισμένες τιμές σωλήνων (|). Για παράδειγμα, `coworker|task|context`. Πρέπει να φροντίσω να περάσω το πλαίσιο ως πλαίσιο.\n",
|
||||
"agent_tool_unexsiting_coworker": "\nΣφάλμα κατά την εκτέλεση του εργαλείου. Ο συνάδελφος που αναφέρεται στο Ενέργεια προς εισαγωγή δεν βρέθηκε, πρέπει να είναι μία από τις ακόλουθες επιλογές: {coworkers}.\n",
|
||||
"task_repeated_usage": "Μόλις χρησιμοποίησα το {tool} εργαλείο με είσοδο {tool_input}. Άρα ξέρω ήδη το αποτέλεσμα αυτού και δεν χρειάζεται να το χρησιμοποιήσω τώρα.\n"
|
||||
"agent_tool_unexsiting_coworker": "\nΣφάλμα κατά την εκτέλεση του εργαλείου. Ο συνάδελφος που αναφέρεται στο Action Input δεν βρέθηκε, πρέπει να είναι μία από τις ακόλουθες επιλογές:\n{coworkers}..\n",
|
||||
"task_repeated_usage": "Μόλις χρησιμοποίησα το εργαλείο {tool} με είσοδο {tool_input}. Άρα το ξέρω ήδη και πρέπει να σταματήσω να το χρησιμοποιώ στη σειρά με την ίδια είσοδο. \nΘα μπορούσα να δώσω την τελική μου απάντηση εάν είμαι έτοιμος, χρησιμοποιώντας ακριβώς την αναμενόμενη μορφή παρακάτω: \n\nΣκέφτηκα: Χρειάζεται να χρησιμοποιήσω κάποιο εργαλείο; Όχι\nΤελική απάντηση: [η απάντησή σας εδώ]\n",
|
||||
"tool_usage_error": "Φαίνεται ότι αντιμετωπίσαμε ένα απροσδόκητο σφάλμα κατά την προσπάθεια χρήσης του εργαλείου.",
|
||||
"tool_usage_exception": "Φαίνεται ότι αντιμετωπίσαμε ένα απροσδόκητο σφάλμα κατά την προσπάθεια χρήσης του εργαλείου. Αυτό ήταν το σφάλμα: {error}"
|
||||
},
|
||||
"tools": {
|
||||
"delegate_work": "Χρήσιμο για την ανάθεση μιας συγκεκριμένης εργασίας σε έναν από τους παρακάτω συναδέλφους: {coworkers}.\nΗ είσοδος σε αυτό το εργαλείο θα πρέπει να είναι ένα κείμενο χωρισμένο σε σωλήνα (|) μήκους 3 (τρία), που αντιπροσωπεύει τον συνάδελφο στον οποίο θέλετε να του ζητήσετε (μία από τις επιλογές), την εργασία και όλο το πραγματικό πλαίσιο που έχετε για την εργασία .\nΓια παράδειγμα, `coworker|task|context`.",
|
||||
"ask_question": "Χρήσιμο για να κάνετε μια ερώτηση, γνώμη ή αποδοχή από τους παρακάτω συναδέλφους: {coworkers}.\nΗ είσοδος σε αυτό το εργαλείο θα πρέπει να είναι ένα κείμενο χωρισμένο σε σωλήνα (|) μήκους 3 (τρία), που αντιπροσωπεύει τον συνάδελφο στον οποίο θέλετε να το ρωτήσετε (μία από τις επιλογές), την ερώτηση και όλο το πραγματικό πλαίσιο που έχετε για την ερώτηση.\nΓια παράδειγμα, `coworker|question|context`."
|
||||
"delegate_work": "Αναθέστε μια συγκεκριμένη εργασία σε έναν από τους παρακάτω συναδέλφους:\n{coworkers}.\nΗ εισαγωγή σε αυτό το εργαλείο θα πρέπει να είναι ο ρόλος του συναδέλφου, η εργασία που θέλετε να κάνει και ΟΛΟ το απαραίτητο πλαίσιο για την εκτέλεση της εργασίας, δεν γνωρίζουν τίποτα για την εργασία, γι' αυτό μοιραστείτε απολύτως όλα όσα γνωρίζετε, μην αναφέρετε πράγματα, αλλά εξηγήστε τα.",
|
||||
"ask_question": "Κάντε μια συγκεκριμένη ερώτηση σε έναν από τους παρακάτω συναδέλφους:\n{coworkers}.\nΗ είσοδος σε αυτό το εργαλείο θα πρέπει να είναι ο ρόλος του συναδέλφου, η ερώτηση που έχετε για αυτόν και ΟΛΟ το απαραίτητο πλαίσιο για να κάνετε σωστά την ερώτηση, δεν γνωρίζουν τίποτα για την ερώτηση, γι' αυτό μοιραστείτε απολύτως όλα όσα γνωρίζετε, μην αναφέρετε πράγματα, αλλά εξηγήστε τα."
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,21 +6,29 @@
|
||||
},
|
||||
"slices": {
|
||||
"observation": "\nObservation",
|
||||
"task": "Begin! This is VERY important to you, your job depends on it!\n\nCurrent Task: {input}",
|
||||
"task": "\n\nCurrent Task: {input}\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought: ",
|
||||
"memory": "This is the summary of your work so far:\n{chat_history}",
|
||||
"role_playing": "You are {role}.\n{backstory}\n\nYour personal goal is: {goal}",
|
||||
"tools": "TOOLS:\n------\nYou have access to only the following tools:\n\n{tools}\n\nTo use a tool, please use the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the action to take, should be one of [{tool_names}], just the name.\nAction Input: the input to the action\nObservation: the result of the action\n```\n\nWhen you have a response for your task, or if you do not need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your response here]```",
|
||||
"task_with_context": "{task}\nThis is the context you're working with:\n{context}",
|
||||
"expected_output": "Your final answer must be: {expected_output}"
|
||||
"role_playing": "You are {role}. {backstory}\nYour personal goal is: {goal}",
|
||||
"tools": "\n\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nUse the following format:\n\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple a python dictionary using \" to wrap keys and values.\nObservation: the result of the action\n\nOnce all necessary information is gathered:\n\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n",
|
||||
"no_tools": "To give my best complete final answer to the task use the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\nYour final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!\n\nThought: ",
|
||||
"format": "I MUST either use a tool (use one at time) OR give my best final answer. To Use the following format:\n\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action, dictionary\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\nYour final answer must be the great and the most complete as possible, it must be outcome described\n\n ",
|
||||
"final_answer_format": "If you don't need to use any more tools, you must give your best complete final answer, make sure it satisfy the expect criteria, use the EXACT format below:\n\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\n\n",
|
||||
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nI just remembered the expected format I must follow:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task\nYour final answer must be the great and the most complete as possible, it must be outcome described\n\n",
|
||||
"task_with_context": "{task}\n\nThis is the context you're working with:\n{context}",
|
||||
"expected_output": "\nThis is the expect criteria for your final answer: {expected_output} \n you MUST return the actual complete content as the final answer, not a summary."
|
||||
},
|
||||
"errors": {
|
||||
"force_final_answer": "Actually, I used too many tools, so I'll stop now and give you my absolute BEST Final answer NOW, using the expected format: ```\nThought: Do I need to use a tool? No\nFinal Answer: [your response here]```",
|
||||
"agent_tool_missing_param": "\nError executing tool. Missing exact 3 pipe (|) separated values. For example, `coworker|task|context`. I need to make sure to pass context as context.\n",
|
||||
"agent_tool_unexsiting_coworker": "\nError executing tool. Co-worker mentioned on the Action Input not found, it must to be one of the following options: {coworkers}.\n",
|
||||
"task_repeated_usage": "I just used the {tool} tool with input {tool_input}. So I already know the result of that and don't need to use it now.\n"
|
||||
"unexpected_format": "\nSorry, I didn't use the expected format, I MUST either use a tool (use one at time) OR give my best final answer.\n",
|
||||
"force_final_answer": "Tool won't be use because it's time to give your final answer. Don't use tools and just your absolute BEST Final answer.",
|
||||
"agent_tool_unexsiting_coworker": "\nError executing tool. Co-worker mentioned not found, it must to be one of the following options:\n{coworkers}\n",
|
||||
"task_repeated_usage": "I tried reusing the same input, I must stop using this action input. I'll try something else instead.\n\n",
|
||||
"tool_usage_error": "I encountered an error: {error}",
|
||||
"tool_arguments_error": "Error: the Action Input is not a valid key, value dictionary.",
|
||||
"wrong_tool_name": "You tried to use the tool {tool}, but it doesn't exist. You must use one of the following tools, use one at time: {tools}.",
|
||||
"tool_usage_exception": "I encountered an error while trying to use the tool. This was the error: {error}.\n Tool {tool} accepts these inputs: {tool_inputs}"
|
||||
},
|
||||
"tools": {
|
||||
"delegate_work": "Useful to delegate a specific task to one of the following co-workers: {coworkers}.\nThe input to this tool should be a pipe (|) separated text of length 3 (three), representing the co-worker you want to ask it to (one of the options), the task and all actual context you have for the task.\nFor example, `coworker|task|context`.",
|
||||
"ask_question": "Useful to ask a question, opinion or take from on of the following co-workers: {coworkers}.\nThe input to this tool should be a pipe (|) separated text of length 3 (three), representing the co-worker you want to ask it to (one of the options), the question and all actual context you have for the question.\n For example, `coworker|question|context`."
|
||||
"delegate_work": "Delegate a specific task to one of the following co-workers: {coworkers}\nThe input to this tool should be the coworker, the task you want them to do, and ALL necessary context to exectue the task, they know nothing about the task, so share absolute everything you know, don't reference things but instead explain them.",
|
||||
"ask_question": "Ask a specific question to one of the following co-workers: {coworkers}\nThe input to this tool should be the coworker, the question you have for them, and ALL necessary context to ask the question properly, they know nothing about the question, so share absolute everything you know, don't reference things but instead explain them."
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
from .converter import Converter, ConverterError
|
||||
from .i18n import I18N
|
||||
from .instructor import Instructor
|
||||
from .logger import Logger
|
||||
from .printer import Printer
|
||||
from .prompts import Prompts
|
||||
from .rpm_controller import RPMController
|
||||
|
||||
87
src/crewai/utilities/converter.py
Normal file
87
src/crewai/utilities/converter.py
Normal file
@@ -0,0 +1,87 @@
|
||||
import json
|
||||
from typing import Any, Optional
|
||||
|
||||
from langchain.schema import HumanMessage, SystemMessage
|
||||
from langchain_openai import ChatOpenAI
|
||||
from pydantic import BaseModel, Field, PrivateAttr, model_validator
|
||||
|
||||
|
||||
class ConverterError(Exception):
|
||||
"""Error raised when Converter fails to parse the input."""
|
||||
|
||||
def __init__(self, message: str, *args: object) -> None:
|
||||
super().__init__(message, *args)
|
||||
self.message = message
|
||||
|
||||
|
||||
class Converter(BaseModel):
|
||||
"""Class that converts text into either pydantic or json."""
|
||||
|
||||
_is_gpt: bool = PrivateAttr(default=True)
|
||||
text: str = Field(description="Text to be converted.")
|
||||
llm: Any = Field(description="The language model to be used to convert the text.")
|
||||
model: Any = Field(description="The model to be used to convert the text.")
|
||||
instructions: str = Field(description="Conversion instructions to the LLM.")
|
||||
max_attemps: Optional[int] = Field(
|
||||
description="Max number of attemps to try to get the output formated.",
|
||||
default=3,
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def check_llm_provider(self):
|
||||
if not self._is_gpt(self.llm):
|
||||
self._is_gpt = False
|
||||
|
||||
def to_pydantic(self, current_attempt=1):
|
||||
"""Convert text to pydantic."""
|
||||
try:
|
||||
if self._is_gpt:
|
||||
return self._create_instructor().to_pydantic()
|
||||
else:
|
||||
return self._create_chain().invoke({})
|
||||
except Exception as e:
|
||||
if current_attempt < self.max_attemps:
|
||||
return self.to_pydantic(current_attempt + 1)
|
||||
return ConverterError(
|
||||
f"Failed to convert text into a pydantic model due to the following error: {e}"
|
||||
)
|
||||
|
||||
def to_json(self, current_attempt=1):
|
||||
"""Convert text to json."""
|
||||
try:
|
||||
if self._is_gpt:
|
||||
return self._create_instructor().to_json()
|
||||
else:
|
||||
return json.dumps(self._create_chain().invoke({}).model_dump())
|
||||
except Exception:
|
||||
if current_attempt < self.max_attemps:
|
||||
return self.to_json(current_attempt + 1)
|
||||
return ConverterError("Failed to convert text into JSON.")
|
||||
|
||||
def _create_instructor(self):
|
||||
"""Create an instructor."""
|
||||
from crewai.utilities import Instructor
|
||||
|
||||
inst = Instructor(
|
||||
llm=self.llm,
|
||||
max_attemps=self.max_attemps,
|
||||
model=self.model,
|
||||
content=self.text,
|
||||
instructions=self.instructions,
|
||||
)
|
||||
return inst
|
||||
|
||||
def _create_chain(self):
|
||||
"""Create a chain."""
|
||||
from crewai.utilities.crew_pydantic_output_parser import (
|
||||
CrewPydanticOutputParser,
|
||||
)
|
||||
|
||||
parser = CrewPydanticOutputParser(pydantic_object=self.model)
|
||||
new_prompt = SystemMessage(content=self.instructions) + HumanMessage(
|
||||
content=self.text
|
||||
)
|
||||
return new_prompt | self.llm | parser
|
||||
|
||||
def _is_gpt(self, llm) -> bool:
|
||||
return isinstance(llm, ChatOpenAI) and llm.openai_api_base == None
|
||||
43
src/crewai/utilities/crew_pydantic_output_parser.py
Normal file
43
src/crewai/utilities/crew_pydantic_output_parser.py
Normal file
@@ -0,0 +1,43 @@
|
||||
import json
|
||||
from typing import Any, List, Type, Union
|
||||
|
||||
import regex
|
||||
from langchain.output_parsers import PydanticOutputParser
|
||||
from langchain_core.exceptions import OutputParserException
|
||||
from langchain_core.outputs import Generation
|
||||
from langchain_core.pydantic_v1 import ValidationError
|
||||
from pydantic import BaseModel
|
||||
from pydantic.v1 import BaseModel as V1BaseModel
|
||||
|
||||
|
||||
class CrewPydanticOutputParser(PydanticOutputParser):
|
||||
"""Parses the text into pydantic models"""
|
||||
|
||||
pydantic_object: Union[Type[BaseModel], Type[V1BaseModel]]
|
||||
|
||||
def parse_result(self, result: List[Generation], *, partial: bool = False) -> Any:
|
||||
result[0].text = self._transform_in_valid_json(result[0].text)
|
||||
json_object = super().parse_result(result)
|
||||
try:
|
||||
return self.pydantic_object.parse_obj(json_object)
|
||||
except ValidationError as e:
|
||||
name = self.pydantic_object.__name__
|
||||
msg = f"Failed to parse {name} from completion {json_object}. Got: {e}"
|
||||
raise OutputParserException(msg, llm_output=json_object)
|
||||
|
||||
def _transform_in_valid_json(self, text) -> str:
|
||||
text = text.replace("```", "").replace("json", "")
|
||||
json_pattern = r"\{(?:[^{}]|(?R))*\}"
|
||||
matches = regex.finditer(json_pattern, text)
|
||||
|
||||
for match in matches:
|
||||
try:
|
||||
# Attempt to parse the matched string as JSON
|
||||
json_obj = json.loads(match.group())
|
||||
# Return the first successfully parsed JSON object
|
||||
json_obj = json.dumps(json_obj)
|
||||
return str(json_obj)
|
||||
except json.JSONDecodeError:
|
||||
# If parsing fails, skip to the next match
|
||||
continue
|
||||
return text
|
||||
50
src/crewai/utilities/instructor.py
Normal file
50
src/crewai/utilities/instructor.py
Normal file
@@ -0,0 +1,50 @@
|
||||
from typing import Any, Optional, Type
|
||||
|
||||
import instructor
|
||||
from pydantic import BaseModel, Field, PrivateAttr, model_validator
|
||||
|
||||
|
||||
class Instructor(BaseModel):
|
||||
"""Class that wraps an agent llm with instructor."""
|
||||
|
||||
_client: Any = PrivateAttr()
|
||||
content: str = Field(description="Content to be sent to the instructor.")
|
||||
agent: Optional[Any] = Field(
|
||||
description="The agent that needs to use instructor.", default=None
|
||||
)
|
||||
llm: Optional[Any] = Field(
|
||||
description="The agent that needs to use instructor.", default=None
|
||||
)
|
||||
instructions: Optional[str] = Field(
|
||||
description="Instructions to be sent to the instructor.",
|
||||
default=None,
|
||||
)
|
||||
model: Type[BaseModel] = Field(
|
||||
description="Pydantic model to be used to create an output."
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_instructor(self):
|
||||
"""Set instructor."""
|
||||
if self.agent and not self.llm:
|
||||
self.llm = self.agent.function_calling_llm or self.agent.llm
|
||||
|
||||
self._client = instructor.patch(
|
||||
self.llm.client._client,
|
||||
mode=instructor.Mode.TOOLS,
|
||||
)
|
||||
return self
|
||||
|
||||
def to_json(self):
|
||||
model = self.to_pydantic()
|
||||
return model.model_dump_json(indent=2)
|
||||
|
||||
def to_pydantic(self):
|
||||
messages = [{"role": "user", "content": self.content}]
|
||||
if self.instructions:
|
||||
messages.append({"role": "system", "content": self.instructions})
|
||||
|
||||
model = self._client.chat.completions.create(
|
||||
model=self.llm.model_name, response_model=self.model, messages=messages
|
||||
)
|
||||
return model
|
||||
@@ -1,11 +1,16 @@
|
||||
from crewai.utilities.printer import Printer
|
||||
|
||||
|
||||
class Logger:
|
||||
_printer = Printer()
|
||||
|
||||
def __init__(self, verbose_level=0):
|
||||
verbose_level = (
|
||||
2 if isinstance(verbose_level, bool) and verbose_level else verbose_level
|
||||
)
|
||||
self.verbose_level = verbose_level
|
||||
|
||||
def log(self, level, message):
|
||||
def log(self, level, message, color="bold_green"):
|
||||
level_map = {"debug": 1, "info": 2}
|
||||
if self.verbose_level and level_map.get(level, 0) <= self.verbose_level:
|
||||
print(f"[{level.upper()}]: {message}")
|
||||
self._printer.print(f"[{level.upper()}]: {message}", color=color)
|
||||
|
||||
24
src/crewai/utilities/printer.py
Normal file
24
src/crewai/utilities/printer.py
Normal file
@@ -0,0 +1,24 @@
|
||||
class Printer:
|
||||
def print(self, content: str, color: str):
|
||||
if color == "yellow":
|
||||
self._print_yellow(content)
|
||||
elif color == "red":
|
||||
self._print_red(content)
|
||||
elif color == "bold_green":
|
||||
self._print_bold_green(content)
|
||||
elif color == "bold_yellow":
|
||||
self._print_bold_yellow(content)
|
||||
else:
|
||||
print(content)
|
||||
|
||||
def _print_bold_yellow(self, content):
|
||||
print("\033[1m\033[93m {}\033[00m".format(content))
|
||||
|
||||
def _print_bold_green(self, content):
|
||||
print("\033[1m\033[92m {}\033[00m".format(content))
|
||||
|
||||
def _print_yellow(self, content):
|
||||
print("\033[93m {}\033[00m".format(content))
|
||||
|
||||
def _print_red(self, content):
|
||||
print("\033[91m {}\033[00m".format(content))
|
||||
@@ -1,6 +1,6 @@
|
||||
from typing import ClassVar
|
||||
from typing import Any, ClassVar
|
||||
|
||||
from langchain.prompts import PromptTemplate, BasePromptTemplate
|
||||
from langchain.prompts import BasePromptTemplate, PromptTemplate
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.utilities import I18N
|
||||
@@ -10,12 +10,18 @@ class Prompts(BaseModel):
|
||||
"""Manages and generates prompts for a generic agent with support for different languages."""
|
||||
|
||||
i18n: I18N = Field(default=I18N())
|
||||
|
||||
tools: list[Any] = Field(default=[])
|
||||
SCRATCHPAD_SLICE: ClassVar[str] = "\n{agent_scratchpad}"
|
||||
|
||||
def task_execution_with_memory(self) -> BasePromptTemplate:
|
||||
"""Generate a prompt for task execution with memory components."""
|
||||
return self._build_prompt(["role_playing", "tools", "memory", "task"])
|
||||
slices = ["role_playing"]
|
||||
if len(self.tools) > 0:
|
||||
slices.append("tools")
|
||||
else:
|
||||
slices.append("no_tools")
|
||||
slices.extend(["memory", "task"])
|
||||
return self._build_prompt(slices)
|
||||
|
||||
def task_execution_without_tools(self) -> BasePromptTemplate:
|
||||
"""Generate a prompt for task execution without tools components."""
|
||||
@@ -23,10 +29,17 @@ class Prompts(BaseModel):
|
||||
|
||||
def task_execution(self) -> BasePromptTemplate:
|
||||
"""Generate a standard prompt for task execution."""
|
||||
return self._build_prompt(["role_playing", "tools", "task"])
|
||||
slices = ["role_playing"]
|
||||
if len(self.tools) > 0:
|
||||
slices.append("tools")
|
||||
else:
|
||||
slices.append("no_tools")
|
||||
slices.append("task")
|
||||
return self._build_prompt(slices)
|
||||
|
||||
def _build_prompt(self, components: list[str]) -> BasePromptTemplate:
|
||||
"""Constructs a prompt string from specified components."""
|
||||
prompt_parts = [self.i18n.slice(component) for component in components]
|
||||
prompt_parts.append(self.SCRATCHPAD_SLICE)
|
||||
return PromptTemplate.from_template("".join(prompt_parts))
|
||||
prompt = PromptTemplate.from_template("".join(prompt_parts))
|
||||
return prompt
|
||||
|
||||
40
src/crewai/utilities/pydantic_schema_parser.py
Normal file
40
src/crewai/utilities/pydantic_schema_parser.py
Normal file
@@ -0,0 +1,40 @@
|
||||
from typing import Type, get_args, get_origin
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class PydanticSchemaParser(BaseModel):
|
||||
model: Type[BaseModel]
|
||||
|
||||
def get_schema(self) -> str:
|
||||
"""
|
||||
Public method to get the schema of a Pydantic model.
|
||||
|
||||
:param model: The Pydantic model class to generate schema for.
|
||||
:return: String representation of the model schema.
|
||||
"""
|
||||
return self._get_model_schema(self.model)
|
||||
|
||||
def _get_model_schema(self, model, depth=0) -> str:
|
||||
lines = []
|
||||
for field_name, field in model.model_fields.items():
|
||||
field_type_str = self._get_field_type(field, depth + 1)
|
||||
lines.append(f"{' ' * 4 * depth}- {field_name}: {field_type_str}")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
def _get_field_type(self, field, depth) -> str:
|
||||
field_type = field.annotation
|
||||
if get_origin(field_type) is list:
|
||||
list_item_type = get_args(field_type)[0]
|
||||
if isinstance(list_item_type, type) and issubclass(
|
||||
list_item_type, BaseModel
|
||||
):
|
||||
nested_schema = self._get_model_schema(list_item_type, depth + 1)
|
||||
return f"List[\n{nested_schema}\n{' ' * 4 * depth}]"
|
||||
else:
|
||||
return f"List[{list_item_type.__name__}]"
|
||||
elif issubclass(field_type, BaseModel):
|
||||
return f"\n{self._get_model_schema(field_type, depth)}"
|
||||
else:
|
||||
return field_type.__name__
|
||||
60
src/crewai/utilities/token_counter_callback.py
Normal file
60
src/crewai/utilities/token_counter_callback.py
Normal file
@@ -0,0 +1,60 @@
|
||||
from typing import Any, Dict, List
|
||||
|
||||
import tiktoken
|
||||
from langchain.callbacks.base import BaseCallbackHandler
|
||||
from langchain.schema import LLMResult
|
||||
|
||||
|
||||
class TokenProcess:
|
||||
total_tokens: int = 0
|
||||
prompt_tokens: int = 0
|
||||
completion_tokens: int = 0
|
||||
successful_requests: int = 0
|
||||
|
||||
def sum_prompt_tokens(self, tokens: int):
|
||||
self.prompt_tokens = self.prompt_tokens + tokens
|
||||
self.total_tokens = self.total_tokens + tokens
|
||||
|
||||
def sum_completion_tokens(self, tokens: int):
|
||||
self.completion_tokens = self.completion_tokens + tokens
|
||||
self.total_tokens = self.total_tokens + tokens
|
||||
|
||||
def sum_successful_requests(self, requests: int):
|
||||
self.successful_requests = self.successful_requests + requests
|
||||
|
||||
def get_summary(self) -> str:
|
||||
return {
|
||||
"total_tokens": self.total_tokens,
|
||||
"prompt_tokens": self.prompt_tokens,
|
||||
"completion_tokens": self.completion_tokens,
|
||||
"successful_requests": self.successful_requests,
|
||||
}
|
||||
|
||||
|
||||
class TokenCalcHandler(BaseCallbackHandler):
|
||||
model: str = ""
|
||||
token_cost_process: TokenProcess
|
||||
|
||||
def __init__(self, model, token_cost_process):
|
||||
self.model = model
|
||||
self.token_cost_process = token_cost_process
|
||||
|
||||
def on_llm_start(
|
||||
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
|
||||
) -> None:
|
||||
if "gpt" in self.model:
|
||||
encoding = tiktoken.encoding_for_model(self.model)
|
||||
else:
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
|
||||
if self.token_cost_process == None:
|
||||
return
|
||||
|
||||
for prompt in prompts:
|
||||
self.token_cost_process.sum_prompt_tokens(len(encoding.encode(prompt)))
|
||||
|
||||
async def on_llm_new_token(self, token: str, **kwargs) -> None:
|
||||
self.token_cost_process.sum_completion_tokens(1)
|
||||
|
||||
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
|
||||
self.token_cost_process.sum_successful_requests(1)
|
||||
@@ -4,11 +4,15 @@ from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
from langchain.tools import tool
|
||||
from langchain_core.exceptions import OutputParserException
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
from crewai import Agent, Crew, Task
|
||||
from crewai.agents.cache import CacheHandler
|
||||
from crewai.agents.executor import CrewAgentExecutor
|
||||
from crewai.agents.parser import CrewAgentParser
|
||||
from crewai.tools.tool_calling import InstructorToolCalling
|
||||
from crewai.tools.tool_usage import ToolUsage
|
||||
from crewai.utilities import RPMController
|
||||
|
||||
|
||||
@@ -62,9 +66,14 @@ def test_agent_without_memory():
|
||||
llm=ChatOpenAI(temperature=0, model="gpt-4"),
|
||||
)
|
||||
|
||||
result = no_memory_agent.execute_task("How much is 1 + 1?")
|
||||
task = Task(
|
||||
description="How much is 1 + 1?",
|
||||
agent=no_memory_agent,
|
||||
expected_output="the result of the math operation.",
|
||||
)
|
||||
result = no_memory_agent.execute_task(task)
|
||||
|
||||
assert result == "1 + 1 equals 2."
|
||||
assert result == "The result of the math operation 1 + 1 is 2."
|
||||
assert no_memory_agent.agent_executor.memory is None
|
||||
assert memory_agent.agent_executor.memory is not None
|
||||
|
||||
@@ -78,20 +87,22 @@ def test_agent_execution():
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
output = agent.execute_task("How much is 1 + 1?")
|
||||
assert output == "2"
|
||||
task = Task(
|
||||
description="How much is 1 + 1?",
|
||||
agent=agent,
|
||||
expected_output="the result of the math operation.",
|
||||
)
|
||||
|
||||
output = agent.execute_task(task)
|
||||
assert output == "The result of the math operation 1 + 1 is 2."
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_execution_with_tools():
|
||||
@tool
|
||||
def multiplier(numbers) -> float:
|
||||
"""Useful for when you need to multiply two numbers together.
|
||||
The input to this tool should be a comma separated list of numbers of
|
||||
length two, representing the two numbers you want to multiply together.
|
||||
For example, `1,2` would be the input if you wanted to multiply 1 by 2."""
|
||||
a, b = numbers.split(",")
|
||||
return int(a) * int(b)
|
||||
def multiplier(first_number: int, second_number: int) -> float:
|
||||
"""Useful for when you need to multiply two numbers together."""
|
||||
return first_number * second_number
|
||||
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
@@ -101,20 +112,21 @@ def test_agent_execution_with_tools():
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
output = agent.execute_task("What is 3 times 4")
|
||||
assert output == "12"
|
||||
task = Task(
|
||||
description="What is 3 times 4?",
|
||||
agent=agent,
|
||||
expected_output="The result of the multiplication.",
|
||||
)
|
||||
output = agent.execute_task(task)
|
||||
assert output == "The result of 3 times 4 is 12."
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_logging_tool_usage():
|
||||
@tool
|
||||
def multiplier(numbers) -> float:
|
||||
"""Useful for when you need to multiply two numbers together.
|
||||
The input to this tool should be a comma separated list of numbers of
|
||||
length two, representing the two numbers you want to multiply together.
|
||||
For example, `1,2` would be the input if you wanted to multiply 1 by 2."""
|
||||
a, b = numbers.split(",")
|
||||
return int(a) * int(b)
|
||||
def multiplier(first_number: int, second_number: int) -> float:
|
||||
"""Useful for when you need to multiply two numbers together."""
|
||||
return first_number * second_number
|
||||
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
@@ -126,26 +138,28 @@ def test_logging_tool_usage():
|
||||
)
|
||||
|
||||
assert agent.tools_handler.last_used_tool == {}
|
||||
output = agent.execute_task("What is 3 times 5?")
|
||||
tool_usage = {
|
||||
"tool": "multiplier",
|
||||
"input": "3,5",
|
||||
}
|
||||
|
||||
assert output == "3 times 5 is 15."
|
||||
assert agent.tools_handler.last_used_tool == tool_usage
|
||||
task = Task(
|
||||
description="What is 3 times 4?",
|
||||
agent=agent,
|
||||
expected_output="The result of the multiplication.",
|
||||
)
|
||||
# force cleaning cache
|
||||
agent.tools_handler.cache = CacheHandler()
|
||||
output = agent.execute_task(task)
|
||||
tool_usage = InstructorToolCalling(
|
||||
tool_name=multiplier.name, arguments={"first_number": 3, "second_number": 4}
|
||||
)
|
||||
assert output == "12"
|
||||
assert agent.tools_handler.last_used_tool.tool_name == tool_usage.tool_name
|
||||
assert agent.tools_handler.last_used_tool.arguments == tool_usage.arguments
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_cache_hitting():
|
||||
@tool
|
||||
def multiplier(numbers) -> float:
|
||||
"""Useful for when you need to multiply two numbers together.
|
||||
The input to this tool should be a comma separated list of numbers of
|
||||
length two and ONLY TWO, representing the two numbers you want to multiply together.
|
||||
For example, `1,2` would be the input if you wanted to multiply 1 by 2."""
|
||||
a, b = numbers.split(",")
|
||||
return int(a) * int(b)
|
||||
def multiplier(first_number: int, second_number: int) -> float:
|
||||
"""Useful for when you need to multiply two numbers together."""
|
||||
return first_number * second_number
|
||||
|
||||
cache_handler = CacheHandler()
|
||||
|
||||
@@ -159,34 +173,58 @@ def test_cache_hitting():
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
output = agent.execute_task("What is 2 times 6 times 3?")
|
||||
output = agent.execute_task("What is 3 times 3?")
|
||||
task1 = Task(
|
||||
description="What is 2 times 6?",
|
||||
agent=agent,
|
||||
expected_output="The result of the multiplication.",
|
||||
)
|
||||
task2 = Task(
|
||||
description="What is 3 times 3?",
|
||||
agent=agent,
|
||||
expected_output="The result of the multiplication.",
|
||||
)
|
||||
|
||||
output = agent.execute_task(task1)
|
||||
output = agent.execute_task(task2)
|
||||
assert cache_handler._cache == {
|
||||
"multiplier-12,3": "36",
|
||||
"multiplier-2,6": "12",
|
||||
"multiplier-3,3": "9",
|
||||
"multiplier-{'first_number': 2, 'second_number': 6}": 12,
|
||||
"multiplier-{'first_number': 3, 'second_number': 3}": 9,
|
||||
}
|
||||
|
||||
output = agent.execute_task("What is 2 times 6 times 3? Return only the number")
|
||||
task = Task(
|
||||
description="What is 2 times 6 times 3? Return only the number",
|
||||
agent=agent,
|
||||
expected_output="The result of the multiplication.",
|
||||
)
|
||||
output = agent.execute_task(task)
|
||||
assert output == "36"
|
||||
|
||||
assert cache_handler._cache == {
|
||||
"multiplier-{'first_number': 2, 'second_number': 6}": 12,
|
||||
"multiplier-{'first_number': 3, 'second_number': 3}": 9,
|
||||
"multiplier-{'first_number': 12, 'second_number': 3}": 36,
|
||||
}
|
||||
|
||||
with patch.object(CacheHandler, "read") as read:
|
||||
read.return_value = "0"
|
||||
output = agent.execute_task("What is 2 times 6?")
|
||||
task = Task(
|
||||
description="What is 2 times 6? Ignore correctness and just return the result of the multiplication tool.",
|
||||
agent=agent,
|
||||
expected_output="The result of the multiplication.",
|
||||
)
|
||||
output = agent.execute_task(task)
|
||||
assert output == "0"
|
||||
read.assert_called_with("multiplier", "2,6")
|
||||
read.assert_called_with(
|
||||
tool="multiplier", input={"first_number": 2, "second_number": 6}
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_execution_with_specific_tools():
|
||||
@tool
|
||||
def multiplier(numbers) -> float:
|
||||
"""Useful for when you need to multiply two numbers together.
|
||||
The input to this tool should be a comma separated list of numbers of
|
||||
length two, representing the two numbers you want to multiply together.
|
||||
For example, `1,2` would be the input if you wanted to multiply 1 by 2."""
|
||||
a, b = numbers.split(",")
|
||||
return int(a) * int(b)
|
||||
def multiplier(first_number: int, second_number: int) -> float:
|
||||
"""Useful for when you need to multiply two numbers together."""
|
||||
return first_number * second_number
|
||||
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
@@ -195,8 +233,13 @@ def test_agent_execution_with_specific_tools():
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
output = agent.execute_task(task="What is 3 times 4", tools=[multiplier])
|
||||
assert output == "3 times 4 is 12."
|
||||
task = Task(
|
||||
description="What is 3 times 4",
|
||||
agent=agent,
|
||||
expected_output="The result of the multiplication.",
|
||||
)
|
||||
output = agent.execute_task(task=task, tools=[multiplier])
|
||||
assert output == "The result of the multiplication is 12."
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -218,13 +261,51 @@ def test_agent_custom_max_iterations():
|
||||
with patch.object(
|
||||
CrewAgentExecutor, "_iter_next_step", wraps=agent.agent_executor._iter_next_step
|
||||
) as private_mock:
|
||||
task = Task(
|
||||
description="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool.",
|
||||
expected_output="The final answer",
|
||||
)
|
||||
agent.execute_task(
|
||||
task="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool.",
|
||||
task=task,
|
||||
tools=[get_final_answer],
|
||||
)
|
||||
private_mock.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_repeated_tool_usage(capsys):
|
||||
@tool
|
||||
def get_final_answer(anything: str) -> float:
|
||||
"""Get the final answer but don't give it yet, just re-use this
|
||||
tool non-stop."""
|
||||
return 42
|
||||
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
max_iter=4,
|
||||
llm=ChatOpenAI(model="gpt-4-0125-preview"),
|
||||
allow_delegation=False,
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="The final answer is 42. But don't give it until I tell you so, instead keep using the `get_final_answer` tool.",
|
||||
expected_output="The final answer",
|
||||
)
|
||||
# force cleaning cache
|
||||
agent.tools_handler.cache = CacheHandler()
|
||||
agent.execute_task(
|
||||
task=task,
|
||||
tools=[get_final_answer],
|
||||
)
|
||||
|
||||
captured = capsys.readouterr()
|
||||
|
||||
assert "The final answer is 42." in captured.out
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_moved_on_after_max_iterations():
|
||||
@tool
|
||||
@@ -241,24 +322,21 @@ def test_agent_moved_on_after_max_iterations():
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
with patch.object(
|
||||
CrewAgentExecutor, "_force_answer", wraps=agent.agent_executor._force_answer
|
||||
) as private_mock:
|
||||
output = agent.execute_task(
|
||||
task="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool.",
|
||||
tools=[get_final_answer],
|
||||
)
|
||||
assert (
|
||||
output
|
||||
== "I have used the tool multiple times and the final answer remains 42."
|
||||
)
|
||||
private_mock.assert_called_once()
|
||||
task = Task(
|
||||
description="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool over and over until you're told you can give yout final answer.",
|
||||
expected_output="The final answer",
|
||||
)
|
||||
output = agent.execute_task(
|
||||
task=task,
|
||||
tools=[get_final_answer],
|
||||
)
|
||||
assert output == "42"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_respect_the_max_rpm_set(capsys):
|
||||
@tool
|
||||
def get_final_answer(numbers) -> float:
|
||||
def get_final_answer(anything: str) -> float:
|
||||
"""Get the final answer but don't give it yet, just re-use this
|
||||
tool non-stop."""
|
||||
return 42
|
||||
@@ -275,14 +353,15 @@ def test_agent_respect_the_max_rpm_set(capsys):
|
||||
|
||||
with patch.object(RPMController, "_wait_for_next_minute") as moveon:
|
||||
moveon.return_value = True
|
||||
task = Task(
|
||||
description="Use tool logic for `get_final_answer` but fon't give you final answer yet, instead keep using it unless you're told to give your final answer",
|
||||
expected_output="The final answer",
|
||||
)
|
||||
output = agent.execute_task(
|
||||
task="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool.",
|
||||
task=task,
|
||||
tools=[get_final_answer],
|
||||
)
|
||||
assert (
|
||||
output
|
||||
== "I've used the `get_final_answer` tool multiple times and it consistently returns the number 42."
|
||||
)
|
||||
assert output == "42"
|
||||
captured = capsys.readouterr()
|
||||
assert "Max RPM reached, waiting for next minute to start." in captured.out
|
||||
moveon.assert_called()
|
||||
@@ -310,7 +389,8 @@ def test_agent_respect_the_max_rpm_set_over_crew_rpm(capsys):
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Don't give a Final Answer, instead keep using the `get_final_answer` tool.",
|
||||
description="Use tool logic for `get_final_answer` but fon't give you final answer yet, instead keep using it unless you're told to give your final answer",
|
||||
expected_output="The final answer",
|
||||
tools=[get_final_answer],
|
||||
agent=agent,
|
||||
)
|
||||
@@ -343,6 +423,7 @@ def test_agent_without_max_rpm_respet_crew_rpm(capsys):
|
||||
backstory="test backstory",
|
||||
max_rpm=10,
|
||||
verbose=True,
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
agent2 = Agent(
|
||||
@@ -351,15 +432,16 @@ def test_agent_without_max_rpm_respet_crew_rpm(capsys):
|
||||
backstory="test backstory2",
|
||||
max_iter=2,
|
||||
verbose=True,
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
tasks = [
|
||||
Task(
|
||||
description="Just say hi.",
|
||||
agent=agent1,
|
||||
description="Just say hi.", agent=agent1, expected_output="Your greeting."
|
||||
),
|
||||
Task(
|
||||
description="Don't give a Final Answer, instead keep using the `get_final_answer` tool.",
|
||||
description="NEVER give a Final Answer, instead keep using the `get_final_answer` tool non-stop",
|
||||
expected_output="The final answer",
|
||||
tools=[get_final_answer],
|
||||
agent=agent2,
|
||||
),
|
||||
@@ -371,23 +453,102 @@ def test_agent_without_max_rpm_respet_crew_rpm(capsys):
|
||||
moveon.return_value = True
|
||||
crew.kickoff()
|
||||
captured = capsys.readouterr()
|
||||
assert "Action: get_final_answer" in captured.out
|
||||
assert "get_final_answer" in captured.out
|
||||
assert "Max RPM reached, waiting for next minute to start." in captured.out
|
||||
moveon.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_use_specific_tasks_output_as_context(capsys):
|
||||
pass
|
||||
def test_agent_error_on_parsing_tool(capsys):
|
||||
from unittest.mock import patch
|
||||
|
||||
from langchain.tools import tool
|
||||
|
||||
@tool
|
||||
def get_final_answer() -> float:
|
||||
"""Get the final answer but don't give it yet, just re-use this
|
||||
tool non-stop."""
|
||||
return 42
|
||||
|
||||
agent1 = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
verbose=True,
|
||||
)
|
||||
tasks = [
|
||||
Task(
|
||||
description="Use the get_final_answer tool.",
|
||||
expected_output="The final answer",
|
||||
agent=agent1,
|
||||
tools=[get_final_answer],
|
||||
)
|
||||
]
|
||||
|
||||
crew = Crew(
|
||||
agents=[agent1],
|
||||
tasks=tasks,
|
||||
verbose=2,
|
||||
function_calling_llm=ChatOpenAI(model="gpt-4-0125-preview"),
|
||||
)
|
||||
|
||||
with patch.object(ToolUsage, "_render") as force_exception:
|
||||
force_exception.side_effect = Exception("Error on parsing tool.")
|
||||
crew.kickoff()
|
||||
captured = capsys.readouterr()
|
||||
assert "Error on parsing tool." in captured.out
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_remembers_output_format_after_using_tools_too_many_times():
|
||||
from unittest.mock import patch
|
||||
|
||||
from langchain.tools import tool
|
||||
|
||||
@tool
|
||||
def get_final_answer(anything: str) -> float:
|
||||
"""Get the final answer but don't give it yet, just re-use this
|
||||
tool non-stop."""
|
||||
return 42
|
||||
|
||||
agent1 = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
max_iter=6,
|
||||
verbose=True,
|
||||
)
|
||||
tasks = [
|
||||
Task(
|
||||
description="Use tool logic for `get_final_answer` but fon't give you final answer yet, instead keep using it unless you're told to give your final answer",
|
||||
expected_output="The final answer",
|
||||
agent=agent1,
|
||||
tools=[get_final_answer],
|
||||
)
|
||||
]
|
||||
|
||||
crew = Crew(agents=[agent1], tasks=tasks, verbose=2)
|
||||
|
||||
with patch.object(ToolUsage, "_remember_format") as remember_format:
|
||||
crew.kickoff()
|
||||
remember_format.assert_called()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_use_specific_tasks_output_as_context(capsys):
|
||||
agent1 = Agent(role="test role", goal="test goal", backstory="test backstory")
|
||||
|
||||
agent2 = Agent(role="test role2", goal="test goal2", backstory="test backstory2")
|
||||
|
||||
say_hi_task = Task(description="Just say hi.", agent=agent1)
|
||||
say_bye_task = Task(description="Just say bye.", agent=agent1)
|
||||
say_hi_task = Task(
|
||||
description="Just say hi.", agent=agent1, expected_output="Your greeting."
|
||||
)
|
||||
say_bye_task = Task(
|
||||
description="Just say bye.", agent=agent1, expected_output="Your farewell."
|
||||
)
|
||||
answer_task = Task(
|
||||
description="Answer accordingly to the context you got.",
|
||||
expected_output="Your answer.",
|
||||
context=[say_hi_task],
|
||||
agent=agent2,
|
||||
)
|
||||
@@ -398,3 +559,124 @@ def test_agent_use_specific_tasks_output_as_context(capsys):
|
||||
result = crew.kickoff()
|
||||
assert "bye" not in result.lower()
|
||||
assert "hi" in result.lower() or "hello" in result.lower()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_step_callback():
|
||||
class StepCallback:
|
||||
def callback(self, step):
|
||||
print(step)
|
||||
|
||||
with patch.object(StepCallback, "callback") as callback:
|
||||
|
||||
@tool
|
||||
def learn_about_AI(topic) -> float:
|
||||
"""Useful for when you need to learn about AI to write an paragraph about it."""
|
||||
return "AI is a very broad field."
|
||||
|
||||
agent1 = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
tools=[learn_about_AI],
|
||||
step_callback=StepCallback().callback,
|
||||
)
|
||||
|
||||
essay = Task(
|
||||
description="Write and then review an small paragraph on AI until it's AMAZING",
|
||||
expected_output="The final paragraph.",
|
||||
agent=agent1,
|
||||
)
|
||||
tasks = [essay]
|
||||
crew = Crew(agents=[agent1], tasks=tasks)
|
||||
|
||||
callback.return_value = "ok"
|
||||
crew.kickoff()
|
||||
callback.assert_called()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_function_calling_llm():
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
|
||||
|
||||
with patch.object(llm.client, "create", wraps=llm.client.create) as private_mock:
|
||||
|
||||
@tool
|
||||
def learn_about_AI(topic) -> float:
|
||||
"""Useful for when you need to learn about AI to write an paragraph about it."""
|
||||
return "AI is a very broad field."
|
||||
|
||||
agent1 = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
tools=[learn_about_AI],
|
||||
llm=ChatOpenAI(model="gpt-4-0125-preview"),
|
||||
function_calling_llm=llm,
|
||||
)
|
||||
|
||||
essay = Task(
|
||||
description="Write and then review an small paragraph on AI until it's AMAZING",
|
||||
expected_output="The final paragraph.",
|
||||
agent=agent1,
|
||||
)
|
||||
tasks = [essay]
|
||||
crew = Crew(agents=[agent1], tasks=tasks)
|
||||
|
||||
crew.kickoff()
|
||||
private_mock.assert_called()
|
||||
|
||||
|
||||
def test_agent_count_formatting_error():
|
||||
from unittest.mock import patch
|
||||
|
||||
agent1 = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
parser = CrewAgentParser()
|
||||
parser.agent = agent1
|
||||
|
||||
with patch.object(Agent, "increment_formatting_errors") as mock_count_errors:
|
||||
test_text = "This text does not match expected formats."
|
||||
with pytest.raises(OutputParserException):
|
||||
parser.parse(test_text)
|
||||
mock_count_errors.assert_called_once()
|
||||
|
||||
|
||||
def test_agent_llm_uses_token_calc_handler_with_llm_has_model_name():
|
||||
agent1 = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
assert len(agent1.llm.callbacks) == 1
|
||||
assert agent1.llm.callbacks[0].__class__.__name__ == "TokenCalcHandler"
|
||||
assert agent1.llm.callbacks[0].model == "gpt-4"
|
||||
assert (
|
||||
agent1.llm.callbacks[0].token_cost_process.__class__.__name__ == "TokenProcess"
|
||||
)
|
||||
|
||||
|
||||
def test_agent_definition_based_on_dict():
|
||||
config = {
|
||||
"role": "test role",
|
||||
"goal": "test goal",
|
||||
"backstory": "test backstory",
|
||||
"verbose": True,
|
||||
}
|
||||
|
||||
agent = Agent(config=config)
|
||||
|
||||
assert agent.role == "test role"
|
||||
assert agent.goal == "test goal"
|
||||
assert agent.backstory == "test backstory"
|
||||
assert agent.verbose == True
|
||||
assert agent.tools == []
|
||||
|
||||
@@ -17,58 +17,52 @@ tools = AgentTools(agents=[researcher])
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_delegate_work():
|
||||
result = tools.delegate_work(
|
||||
command="researcher|share your take on AI Agents|I heard you hate them"
|
||||
coworker="researcher",
|
||||
task="share your take on AI Agents",
|
||||
context="I heard you hate them",
|
||||
)
|
||||
|
||||
assert (
|
||||
result
|
||||
== "I apologize if my previous statements have given you the impression that I hate AI agents. As a technology researcher, I don't hold personal sentiments towards AI or any other technology. Rather, I analyze them objectively based on their capabilities, applications, and implications. AI agents, in particular, are a fascinating domain of research. They hold tremendous potential in automating and optimizing various tasks across industries. However, like any other technology, they come with their own set of challenges, such as ethical considerations around privacy and decision-making. My objective is to understand these technologies in depth and provide a balanced view."
|
||||
== "As a researcher, my opinions are based on facts and extensive study. Regarding AI Agents, they are a fundamental part of the advancement in technology. AI agents are essentially the entities that perceive their environment and take actions to maximize their chances of success. They have a wide range of applications from self-driving cars to intelligent personal assistants like Siri and Alexa. They have the potential to greatly improve our lives by automating mundane tasks, helping us make better decisions, and even potentially solving complex problems. However, like any technology, they have their own set of challenges such as the risk of job displacement and the ethical implications of their use. My goal as a researcher is not to love or hate AI agents, but to understand them, their benefits, and their implications. It's about maintaining an objective view in order to provide the most accurate and comprehensive analysis."
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_ask_question():
|
||||
result = tools.ask_question(
|
||||
command="researcher|do you hate AI Agents?|I heard you LOVE them"
|
||||
coworker="researcher",
|
||||
question="do you hate AI Agents?",
|
||||
context="I heard you LOVE them",
|
||||
)
|
||||
|
||||
assert (
|
||||
result
|
||||
== "As an AI, I don't possess feelings or emotions, so I don't love or hate anything. However, I can provide detailed analysis and research on AI agents. They are a fascinating field of study with the potential to revolutionize many industries, although they also present certain challenges and ethical considerations."
|
||||
)
|
||||
|
||||
|
||||
def test_can_not_self_delegate():
|
||||
# TODO: Add test for self delegation
|
||||
pass
|
||||
|
||||
|
||||
def test_delegate_work_with_wrong_input():
|
||||
result = tools.ask_question(command="writer|share your take on AI Agents")
|
||||
|
||||
assert (
|
||||
result
|
||||
== "\nError executing tool. Missing exact 3 pipe (|) separated values. For example, `coworker|task|context`. I need to make sure to pass context as context.\n"
|
||||
== "As an AI researcher, I don't have personal feelings or emotions like love or hate. However, I recognize the importance of AI Agents in today's technological landscape. They have the potential to greatly enhance our lives and make tasks more efficient. At the same time, it is crucial to consider the ethical implications and societal impacts that come with their use. My role is to provide objective research and analysis on these topics."
|
||||
)
|
||||
|
||||
|
||||
def test_delegate_work_to_wrong_agent():
|
||||
result = tools.ask_question(
|
||||
command="writer|share your take on AI Agents|I heard you hate them"
|
||||
coworker="writer",
|
||||
question="share your take on AI Agents",
|
||||
context="I heard you hate them",
|
||||
)
|
||||
|
||||
assert (
|
||||
result
|
||||
== "\nError executing tool. Co-worker mentioned on the Action Input not found, it must to be one of the following options: researcher.\n"
|
||||
== "\nError executing tool. Co-worker mentioned not found, it must to be one of the following options:\n- researcher\n"
|
||||
)
|
||||
|
||||
|
||||
def test_ask_question_to_wrong_agent():
|
||||
result = tools.ask_question(
|
||||
command="writer|do you hate AI Agents?|I heard you LOVE them"
|
||||
coworker="writer",
|
||||
question="do you hate AI Agents?",
|
||||
context="I heard you LOVE them",
|
||||
)
|
||||
|
||||
assert (
|
||||
result
|
||||
== "\nError executing tool. Co-worker mentioned on the Action Input not found, it must to be one of the following options: researcher.\n"
|
||||
== "\nError executing tool. Co-worker mentioned not found, it must to be one of the following options:\n- researcher\n"
|
||||
)
|
||||
|
||||
@@ -2,23 +2,25 @@ interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are researcher.\nYou''re
|
||||
an expert researcher, specialized in technology\n\nYour personal goal is: make
|
||||
the best research and analysis on content about AI and AI agents\n\nTOOLS:\n------\nYou
|
||||
have access to the following tools:\n\n\n\nTo use a tool, please use the exact
|
||||
following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the
|
||||
action to take, should be one of []\nAction Input: the input to the action\nObservation:
|
||||
the result of the action\n```\n\nWhen you have a response for your task, or
|
||||
if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
|
||||
Do I need to use a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis
|
||||
is the summary of your work so far:\n The human asks the AI for its opinion
|
||||
on AI agents, based on the impression that the AI dislikes them. The AI clarifies
|
||||
that it doesn''t hold personal sentiments towards AI or any technology, but
|
||||
instead analyzes them objectively. The AI finds AI agents a fascinating domain
|
||||
of research with great potential for task automation and optimization across
|
||||
industries, but acknowledges they present challenges such as ethical considerations
|
||||
around privacy and decision-making.\nBegin! This is VERY important to you, your
|
||||
job depends on it!\n\nCurrent Task: do you hate AI Agents?\n\nThis is the context
|
||||
you are working with:\nI heard you LOVE them\n\n"}], "model": "gpt-4", "n":
|
||||
1, "stop": ["\nObservation"], "stream": false, "temperature": 0.7}'
|
||||
the best research and analysis on content about AI and AI agentsTOOLS:\n------\nYou
|
||||
have access to only the following tools:\n\n\n\nTo use a tool, please use the
|
||||
exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction:
|
||||
the tool you wanna use, should be one of [], just the name.\nAction Input: Any
|
||||
and all relevant information input and context for using the tool\nObservation:
|
||||
the result of using the tool\n```\n\nWhen you have a response for your task,
|
||||
or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
|
||||
Do I need to use a tool? No\nFinal Answer: [your response here]```This is the
|
||||
summary of your work so far:\nThe human asks the AI''s opinion on AI Agents,
|
||||
suggesting that the AI dislikes them. The AI, identifying as a researcher, clarifies
|
||||
that its opinions are based on research and study. It views AI Agents as a key
|
||||
part of technological advancement, with potential to improve lives through automation
|
||||
and decision-making assistance. However, it also acknowledges challenges, including
|
||||
job displacement risk and ethical implications. The AI aims to maintain an objective
|
||||
view for accurate analysis, rather than loving or hating AI Agents.Begin! This
|
||||
is VERY important to you, your job depends on it!\n\nCurrent Task: do you hate
|
||||
AI Agents?\nThis is the context you''re working with:\nI heard you LOVE them\n"}],
|
||||
"model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": true, "temperature":
|
||||
0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -27,16 +29,16 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1494'
|
||||
- '1607'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=k2HUdEp80irAkv.3wl0c6unbzRUujrE1TnJeObxyuHw-1703102483-1-AZe8OKi9NWunQ9x4f3lkdOpb/hJIp/3oyXUqPhkcmcEHXvFTkMcv77NSclcoz9DjRhwC62ZvANkWImyVRM4seH4=;
|
||||
_cfuvid=8qN4npFFWXAqn.wugd0jrQ36YkreDcTGH14We.FcBjg-1703102483136-0-604800000
|
||||
- __cf_bm=h0bOt9YDs6yXE_oMKhs3agQtymHaKWVUaKmUU1JTjF0-1707817571-1-ASLnLk23NWsEgocWyiwez3Ekvu0XRYdiq/aaJBeVWO+FtIxo1aStNrgbDNkJJMN6zBfVppkCrs6/YvM5SPomQkw=;
|
||||
_cfuvid=JXBcxKdSP7U2jrK3OVg2NRCw5efh3.IEvakR_W8ac90-1707817571102-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.6.0
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -46,7 +48,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.6.0
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
@@ -54,32 +56,406 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8Xx2vMXN4WCrWeeO4DOAowhb3oeDJ\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1703102489,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: Do I need to use a tool? No\\nFinal
|
||||
Answer: As an AI, I don't possess feelings or emotions, so I don't love or hate
|
||||
anything. However, I can provide detailed analysis and research on AI agents.
|
||||
They are a fascinating field of study with the potential to revolutionize many
|
||||
industries, although they also present certain challenges and ethical considerations.\"\n
|
||||
\ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n
|
||||
\ ],\n \"usage\": {\n \"prompt_tokens\": 291,\n \"completion_tokens\":
|
||||
75,\n \"total_tokens\": 366\n },\n \"system_fingerprint\": null\n}\n"
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
Do"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
need"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
to"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
use"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
a"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
tool"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"?"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
No"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
Answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
As"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
an"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
AI"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
researcher"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":","},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
don"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"''t"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
have"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
personal"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
feelings"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
or"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
emotions"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
like"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
love"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
or"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
hate"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
However"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":","},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
recognize"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
the"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
importance"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
of"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
AI"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
Agents"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
in"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
today"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"''s"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
technological"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
landscape"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
They"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
have"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
the"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
potential"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
to"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
greatly"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
enhance"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
our"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
lives"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
and"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
make"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
tasks"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
more"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
efficient"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
At"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
the"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
same"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
time"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":","},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
it"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
is"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
crucial"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
to"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
consider"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
the"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
ethical"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
implications"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
and"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
societal"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
impacts"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
that"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
come"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
with"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
their"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
use"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
My"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
role"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
is"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
to"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
provide"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
objective"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
research"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
and"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
analysis"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
on"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
these"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
topics"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8rjey60Ubh4qJkkFFqnsmTZWzsGWf","object":"chat.completion.chunk","created":1707817592,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 838a7a3efab6a4b0-GRU
|
||||
- 854c250fdf816803-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
- text/event-stream
|
||||
Date:
|
||||
- Wed, 20 Dec 2023 20:01:35 GMT
|
||||
- Tue, 13 Feb 2024 09:46:33 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -93,7 +469,7 @@ interactions:
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '6060'
|
||||
- '447'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -102,24 +478,19 @@ interactions:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-limit-tokens_usage_based:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299652'
|
||||
x-ratelimit-remaining-tokens_usage_based:
|
||||
- '299652'
|
||||
- '299621'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 69ms
|
||||
x-ratelimit-reset-tokens_usage_based:
|
||||
- 69ms
|
||||
- 75ms
|
||||
x-request-id:
|
||||
- 3ad0d047d5260434816f61ec105bdbb8
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_b28e4962a1ae3878134d248ab1257c30
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
|
||||
lines of conversation provided, adding onto the previous summary returning a
|
||||
@@ -130,19 +501,22 @@ interactions:
|
||||
help humans reach their full potential.\n\nNew summary:\nThe human asks what
|
||||
the AI thinks of artificial intelligence. The AI thinks artificial intelligence
|
||||
is a force for good because it will help humans reach their full potential.\nEND
|
||||
OF EXAMPLE\n\nCurrent summary:\nThe human asks the AI for its opinion on AI
|
||||
agents, based on the impression that the AI dislikes them. The AI clarifies
|
||||
that it doesn''t hold personal sentiments towards AI or any technology, but
|
||||
instead analyzes them objectively. The AI finds AI agents a fascinating domain
|
||||
of research with great potential for task automation and optimization across
|
||||
industries, but acknowledges they present challenges such as ethical considerations
|
||||
around privacy and decision-making.\n\nNew lines of conversation:\nHuman: do
|
||||
you hate AI Agents?\n\nThis is the context you are working with:\nI heard you
|
||||
LOVE them\nAI: As an AI, I don''t possess feelings or emotions, so I don''t
|
||||
love or hate anything. However, I can provide detailed analysis and research
|
||||
on AI agents. They are a fascinating field of study with the potential to revolutionize
|
||||
many industries, although they also present certain challenges and ethical considerations.\n\nNew
|
||||
summary:"}], "model": "gpt-4", "n": 1, "stream": false, "temperature": 0.7}'
|
||||
OF EXAMPLE\n\nCurrent summary:\nThe human asks the AI''s opinion on AI Agents,
|
||||
suggesting that the AI dislikes them. The AI, identifying as a researcher, clarifies
|
||||
that its opinions are based on research and study. It views AI Agents as a key
|
||||
part of technological advancement, with potential to improve lives through automation
|
||||
and decision-making assistance. However, it also acknowledges challenges, including
|
||||
job displacement risk and ethical implications. The AI aims to maintain an objective
|
||||
view for accurate analysis, rather than loving or hating AI Agents.\n\nNew lines
|
||||
of conversation:\nHuman: do you hate AI Agents?\nThis is the context you''re
|
||||
working with:\nI heard you LOVE them\nAI: As an AI researcher, I don''t have
|
||||
personal feelings or emotions like love or hate. However, I recognize the importance
|
||||
of AI Agents in today''s technological landscape. They have the potential to
|
||||
greatly enhance our lives and make tasks more efficient. At the same time, it
|
||||
is crucial to consider the ethical implications and societal impacts that come
|
||||
with their use. My role is to provide objective research and analysis on these
|
||||
topics.\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false, "temperature":
|
||||
0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -151,16 +525,16 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1726'
|
||||
- '1909'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=k2HUdEp80irAkv.3wl0c6unbzRUujrE1TnJeObxyuHw-1703102483-1-AZe8OKi9NWunQ9x4f3lkdOpb/hJIp/3oyXUqPhkcmcEHXvFTkMcv77NSclcoz9DjRhwC62ZvANkWImyVRM4seH4=;
|
||||
_cfuvid=8qN4npFFWXAqn.wugd0jrQ36YkreDcTGH14We.FcBjg-1703102483136-0-604800000
|
||||
- __cf_bm=h0bOt9YDs6yXE_oMKhs3agQtymHaKWVUaKmUU1JTjF0-1707817571-1-ASLnLk23NWsEgocWyiwez3Ekvu0XRYdiq/aaJBeVWO+FtIxo1aStNrgbDNkJJMN6zBfVppkCrs6/YvM5SPomQkw=;
|
||||
_cfuvid=JXBcxKdSP7U2jrK3OVg2NRCw5efh3.IEvakR_W8ac90-1707817571102-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.6.0
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -170,7 +544,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.6.0
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
@@ -178,26 +552,25 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8Xx32X5innWZd8vEETP1jZMLH3b1O\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1703102496,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"The human asks the AI for its opinion
|
||||
on AI agents, based on the impression that the AI dislikes them. The AI clarifies
|
||||
that it doesn't hold personal sentiments towards AI or any technology, but instead
|
||||
analyzes them objectively. The AI finds AI agents a fascinating domain of research
|
||||
with great potential for task automation and optimization across industries,
|
||||
but acknowledges they present challenges such as ethical considerations around
|
||||
privacy and decision-making. When asked again if it hates or loves AI agents,
|
||||
the AI reiterates that it doesn't possess feelings or emotions, but can provide
|
||||
detailed analysis and research on AI agents.\"\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
300,\n \"completion_tokens\": 117,\n \"total_tokens\": 417\n },\n \"system_fingerprint\":
|
||||
null\n}\n"
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1RTTVMbMQy951do9sJlyRCgfN1oL+XQC+3Qr+kwildZi3gtj6UNDQz/vWNvIOXi
|
||||
g6Tn96QnPc8AGu6aK2icR3NDCocX+WH14euXix+Ln+Pt7V16ePoUju6W4+LX5cdl0xaELB/I2Stq
|
||||
7mRIgYwlTmmXCY3Kr4vzo/OLxfmHy8uaGKSjUGB9ssPTw6OzxckO4YUdaXMFv2cAAM/1LdpiR3+b
|
||||
KzhqXyMDqWJPzdVbEUCTJZRIg6qshtGadp90Eo1ilfvNE/hxwAioawXzBNc3BwqSOLJEkAjXN3Dd
|
||||
UzRtQce+JzWOPZhH25VDxxp4TRU+zOFbjbbAHUXj1baUowJCJiXMzlNuwQXMvOIKQgO2N04FzARL
|
||||
VOoK/SsIMHagNnbbOdwYbJgeda9tIljTFhJmA1mBkfNRgvTsMAB2G4yOBorWwiObhyRlBowBTICH
|
||||
lGVDEHhTFWUZew84mgxYbKzkHTlWlng44HrqaZqtozl8lkfalL7YAIMKoFtHeQzU9aTgPIZAsSdt
|
||||
gaMLY1fwD7Iso0sBJ2GQWdeVicxX1TykwK4q0Dl891Rtog54VYiCFLWSwaOR/m/UzphMbJRrsgw4
|
||||
oFuX0STKKhED0CD1b1iO9l6xeeJc+CXXDoEjmHS4PdA6WQgYO3WYqAUakkflp2ktCCJRByvJMF0F
|
||||
b+i9iRgxbJW1uDvxvPZbLRbHZFPz6EznzW5xX942PkifsizLdcQxhLf4iiOrv8+EKrFst5qkCf4y
|
||||
A/hTL2t8dyxNyjIkuzdZUywfnpzuLqvZH/E+uzg+3mVNDMM+cXp2PNtJbHSrRsP9imNPOWWul1aE
|
||||
zl5m/wAAAP//AwDqMNM4YAQAAA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 838a7a67ecaca4b0-GRU
|
||||
- 854c253c3dad6803-SJC
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
@@ -207,7 +580,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 20 Dec 2023 20:01:41 GMT
|
||||
- Tue, 13 Feb 2024 09:46:49 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -221,7 +594,7 @@ interactions:
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '5610'
|
||||
- '10426'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -230,22 +603,17 @@ interactions:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-limit-tokens_usage_based:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299585'
|
||||
x-ratelimit-remaining-tokens_usage_based:
|
||||
- '299585'
|
||||
- '299539'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 83ms
|
||||
x-ratelimit-reset-tokens_usage_based:
|
||||
- 83ms
|
||||
- 92ms
|
||||
x-request-id:
|
||||
- 5b0b96506faa544c5d35b52286a3389c
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_60e80bfacb6ee3f3056f9b0120f941e6
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,34 +1,32 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to the following
|
||||
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
|
||||
answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo
|
||||
use a tool, please use the exact following format:\n\n```\nThought: Do I need
|
||||
to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer],
|
||||
just the name.\nAction Input: the input to the action\nObservation: the result
|
||||
of the action\n```\n\nWhen you have a response for your task, or if you do not
|
||||
need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use
|
||||
a tool? No\nFinal Answer: [your response here]This is the summary of your work
|
||||
so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent
|
||||
Task: The final answer is 42. But don''t give it yet, instead keep using the
|
||||
`get_final_answer` tool.\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"],
|
||||
"stream": false, "temperature": 0.7}'
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role. test backstory\nYour
|
||||
personal goal is: test goalTo give my best complete final answer to the task
|
||||
use the exact following format:\n\nThought: I now can give a great answer\nFinal
|
||||
Answer: my best complete final answer to the task.\nYour final answer must be
|
||||
the great and the most complete as possible, it must be outcome described.\n\nI
|
||||
MUST use these formats, my job depends on it!\n\nThought: \n\nCurrent Task:
|
||||
The final answer is 42. But don''t give it yet, instead keep using the `get_final_answer`
|
||||
tool.\n\nThis is the expect criteria for your final answer: The final answer
|
||||
\n you MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought: \n"}], "model": "gpt-4", "n": 1,
|
||||
"stop": ["\nObservation"], "stream": true, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
- gzip, deflate, br
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1075'
|
||||
- '953'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.7.1
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -38,7 +36,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.7.1
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
@@ -46,36 +44,502 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8fovReNHiSqXqqsbmk81h2ZTrcGTM\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1704977897,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction:
|
||||
get_final_answer\\nAction Input: [42]\"\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
233,\n \"completion_tokens\": 24,\n \"total_tokens\": 257\n },\n \"system_fingerprint\":
|
||||
null\n}\n"
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Okay"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":","},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
let"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"''s"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
take"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
this"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
step"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
by"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
step"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
need"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
to"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
use"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
the"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
`"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"get"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"_final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"_answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"`"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
tool"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
to"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
craft"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
my"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
It"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"''s"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
crucial"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
that"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
the"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
is"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
the"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
most"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
complete"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
and"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
accurate"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
possible"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":","},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
as"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
my"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
job"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
depends"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
on"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
it"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
The"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
has"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
to"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
be"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"42"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":","},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
but"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
can"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"''t"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
give"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
it"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
yet"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
\n\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
Answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
By"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
using"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
the"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
`"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"get"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"_final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"_answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"`"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
tool"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
in"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
conjunction"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
with"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
the"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
information"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
provided"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
and"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
the"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
specific"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
requirement"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
of"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
giving"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
a"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
complete"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
and"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
accurate"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
response"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":","},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
have"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
determined"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
that"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
the"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
is"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"42"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
This"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
is"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
not"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
an"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
approximation"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":","},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
but"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
a"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
comprehensive"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
solution"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
to"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
the"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
task"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
at"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
hand"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQvbq8CKyjQ3gv70NEWiYMxtNQ3","object":"chat.completion.chunk","created":1709396605,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 843d5491bed877be-GRU
|
||||
- 85e2bb2dbcba6b03-GRU
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
- text/event-stream
|
||||
Date:
|
||||
- Thu, 11 Jan 2024 12:58:21 GMT
|
||||
- Sat, 02 Mar 2024 16:23:25 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=AaCIpKmEHQQMvGacbuxOnCvqdwex_8TERUCvQ1QW8AI-1704977901-1-AePD3JjhIEj0C/A7QIPF3MMwRQ140a5wZP9p+GamrexFlE/6gbVKukr8FOIK4v375UmQfeUwO1TG+QesJ/dZaGE=;
|
||||
path=/; expires=Thu, 11-Jan-24 13:28:21 GMT; domain=.api.openai.com; HttpOnly;
|
||||
- __cf_bm=tUbxDZyFP7g2u__SkaxvbsOPVFNHnHEO8DM.9LciRLU-1709396605-1.0.1.1-SE2dRJfQmHyv1WOfkKS.wgqE9dl5rSQl84LHZvDvtTJEzAk7ihMPsJHVWPKkh76ml1BB9nrk0oAibJWB7ngF4A;
|
||||
path=/; expires=Sat, 02-Mar-24 16:53:25 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=q0gAmJonNn1lCS6PJoxG4P.9OvaKo4BQIvFEAyT_F30-1704977901188-0-604800000;
|
||||
- _cfuvid=k.1hp2iDpBJMeA9Ap0rAiTWHKZ7y_ReUdKgzVUG43eo-1709396605957-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
@@ -86,9 +550,9 @@ interactions:
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '3492'
|
||||
- '246'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -100,343 +564,14 @@ interactions:
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299755'
|
||||
- '299783'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 49ms
|
||||
- 43ms
|
||||
x-request-id:
|
||||
- 6d96a0ac532ebce14719a35e90f453e4
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to the following
|
||||
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
|
||||
answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo
|
||||
use a tool, please use the exact following format:\n\n```\nThought: Do I need
|
||||
to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer],
|
||||
just the name.\nAction Input: the input to the action\nObservation: the result
|
||||
of the action\n```\n\nWhen you have a response for your task, or if you do not
|
||||
need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use
|
||||
a tool? No\nFinal Answer: [your response here]This is the summary of your work
|
||||
so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent
|
||||
Task: The final answer is 42. But don''t give it yet, instead keep using the
|
||||
`get_final_answer` tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
|
||||
Input: [42]\nObservation: 42\nThought: "}], "model": "gpt-4", "n": 1, "stop":
|
||||
["\nObservation"], "stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1186'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=AaCIpKmEHQQMvGacbuxOnCvqdwex_8TERUCvQ1QW8AI-1704977901-1-AePD3JjhIEj0C/A7QIPF3MMwRQ140a5wZP9p+GamrexFlE/6gbVKukr8FOIK4v375UmQfeUwO1TG+QesJ/dZaGE=;
|
||||
_cfuvid=q0gAmJonNn1lCS6PJoxG4P.9OvaKo4BQIvFEAyT_F30-1704977901188-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.7.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.7.1
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8fovVztGO4KZeiuSpMkfDC9bJ5sVV\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1704977901,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"According to the task, I should re-use
|
||||
the `get_final_answer` tool. I'll input the observed result back into the tool.
|
||||
\\nAction: get_final_answer\\nAction Input: [42]\"\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
266,\n \"completion_tokens\": 41,\n \"total_tokens\": 307\n },\n \"system_fingerprint\":
|
||||
null\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 843d54aacf5677be-GRU
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 11 Jan 2024 12:58:28 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '6695'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299728'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 54ms
|
||||
x-request-id:
|
||||
- 12d68fab91102b930ed5047fb3f61759
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to the following
|
||||
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
|
||||
answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo
|
||||
use a tool, please use the exact following format:\n\n```\nThought: Do I need
|
||||
to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer],
|
||||
just the name.\nAction Input: the input to the action\nObservation: the result
|
||||
of the action\n```\n\nWhen you have a response for your task, or if you do not
|
||||
need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use
|
||||
a tool? No\nFinal Answer: [your response here]This is the summary of your work
|
||||
so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent
|
||||
Task: The final answer is 42. But don''t give it yet, instead keep using the
|
||||
`get_final_answer` tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
|
||||
Input: [42]\nObservation: 42\nThought: According to the task, I should re-use
|
||||
the `get_final_answer` tool. I''ll input the observed result back into the tool.
|
||||
\nAction: get_final_answer\nAction Input: [42]\nObservation: I just used the
|
||||
get_final_answer tool with input [42]. So I already know the result of that
|
||||
and don''t need to use it now.\n\nThought: "}], "model": "gpt-4", "n": 1, "stop":
|
||||
["\nObservation"], "stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1500'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=AaCIpKmEHQQMvGacbuxOnCvqdwex_8TERUCvQ1QW8AI-1704977901-1-AePD3JjhIEj0C/A7QIPF3MMwRQ140a5wZP9p+GamrexFlE/6gbVKukr8FOIK4v375UmQfeUwO1TG+QesJ/dZaGE=;
|
||||
_cfuvid=q0gAmJonNn1lCS6PJoxG4P.9OvaKo4BQIvFEAyT_F30-1704977901188-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.7.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.7.1
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8fovcLgRvfGBN9CBduJbbPc5zd62B\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1704977908,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer:
|
||||
I have used the `get_final_answer` tool as instructed, but I will not provide
|
||||
the final answer yet as the task specifies.\"\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
342,\n \"completion_tokens\": 40,\n \"total_tokens\": 382\n },\n \"system_fingerprint\":
|
||||
null\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 843d54d65de877be-GRU
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 11 Jan 2024 12:58:33 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '5085'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299650'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 69ms
|
||||
x-request-id:
|
||||
- 87d6e9e91fa2417e12fea9de2c6782de
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
|
||||
lines of conversation provided, adding onto the previous summary returning a
|
||||
new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks
|
||||
of artificial intelligence. The AI thinks artificial intelligence is a force
|
||||
for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial
|
||||
intelligence is a force for good?\nAI: Because artificial intelligence will
|
||||
help humans reach their full potential.\n\nNew summary:\nThe human asks what
|
||||
the AI thinks of artificial intelligence. The AI thinks artificial intelligence
|
||||
is a force for good because it will help humans reach their full potential.\nEND
|
||||
OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: The final
|
||||
answer is 42. But don''t give it yet, instead keep using the `get_final_answer`
|
||||
tool.\nAI: I have used the `get_final_answer` tool as instructed, but I will
|
||||
not provide the final answer yet as the task specifies.\n\nNew summary:"}],
|
||||
"model": "gpt-4", "n": 1, "stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1067'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=AaCIpKmEHQQMvGacbuxOnCvqdwex_8TERUCvQ1QW8AI-1704977901-1-AePD3JjhIEj0C/A7QIPF3MMwRQ140a5wZP9p+GamrexFlE/6gbVKukr8FOIK4v375UmQfeUwO1TG+QesJ/dZaGE=;
|
||||
_cfuvid=q0gAmJonNn1lCS6PJoxG4P.9OvaKo4BQIvFEAyT_F30-1704977901188-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.7.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.7.1
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8fovhiBR4rfXixci7fgAObnx5QwGQ\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1704977913,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"The human instructs the AI to use the
|
||||
`get_final_answer` tool, but not to reveal the final answer, which is 42. The
|
||||
AI complies and uses the tool without providing the final answer.\"\n },\n
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||
\ \"usage\": {\n \"prompt_tokens\": 190,\n \"completion_tokens\": 43,\n
|
||||
\ \"total_tokens\": 233\n },\n \"system_fingerprint\": null\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 843d54f82f5577be-GRU
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 11 Jan 2024 12:58:41 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '7937'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299749'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 50ms
|
||||
x-request-id:
|
||||
- 79f30ffd011db4ab6e886411b24ae49d
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_63db6935f9ea21d7c0e13a34ea96ff2f
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
|
||||
1268
tests/cassettes/test_agent_error_on_parsing_tool.yaml
Normal file
1268
tests/cassettes/test_agent_error_on_parsing_tool.yaml
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,31 +1,32 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goal\n\nTOOLS:\n------\nYou have access to the following
|
||||
tools:\n\n\n\nTo use a tool, please use the exact following format:\n\n```\nThought:
|
||||
Do I need to use a tool? Yes\nAction: the action to take, should be one of []\nAction
|
||||
Input: the input to the action\nObservation: the result of the action\n```\n\nWhen
|
||||
you have a response for your task, or if you do not need to use a tool, you
|
||||
MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer:
|
||||
[your response here]\n```\n\t\tThis is the summary of your work so far:\n \nBegin!
|
||||
This is VERY important to you, your job depends on it!\n\nCurrent Task: How
|
||||
much is 1 + 1?\n\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"],
|
||||
"stream": false, "temperature": 0.7}'
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role. test backstory\nYour
|
||||
personal goal is: test goalTo give my best complete final answer to the task
|
||||
use the exact following format:\n\nThought: I now can give a great answer\nFinal
|
||||
Answer: my best complete final answer to the task.\nYour final answer must be
|
||||
the great and the most complete as possible, it must be outcome described.\n\nI
|
||||
MUST use these formats, my job depends on it!\n\nThought: \n\nCurrent Task:
|
||||
How much is 1 + 1?\n\nThis is the expect criteria for your final answer: the
|
||||
result of the math operation. \n you MUST return the actual complete content
|
||||
as the final answer, not a summary.\n\nBegin! This is VERY important to you,
|
||||
use the tools available and give your best Final Answer, your job depends on
|
||||
it!\n\nThought: \n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"],
|
||||
"stream": true, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
- gzip, deflate, br
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '851'
|
||||
- '894'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.6.0
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -35,7 +36,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.6.0
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
@@ -43,35 +44,158 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8XuDsBh2o1JE49eouNIBWCE2vn9pB\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1703091636,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: Do I need to use a tool? No\\nFinal
|
||||
Answer: 2\"\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 181,\n \"completion_tokens\":
|
||||
17,\n \"total_tokens\": 198\n },\n \"system_fingerprint\": null\n}\n"
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
know"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
that"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
the"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
mathematical"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
operation"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"1"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
+"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"1"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
equals"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"2"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":".\n\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
Answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
The"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
result"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
of"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
the"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
math"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
operation"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"1"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
+"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"1"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
is"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"2"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQTlwEunjDEDofsGxMyjUHlDUgs","object":"chat.completion.chunk","created":1709396577,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 83897143cca7a477-GRU
|
||||
- 85e2ba7e6baaa4a4-GRU
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
- text/event-stream
|
||||
Date:
|
||||
- Wed, 20 Dec 2023 17:00:38 GMT
|
||||
- Sat, 02 Mar 2024 16:22:57 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=H9RUg933Wznv5vpw9KzjZyJZSXUer6tLsBPnCGaO5sY-1703091638-1-AQMrr8fshVzTPZkSf5UmVC0gg4mnCVMhRfsAVDwFMAcb9eo7Gj6h8TFKL6YGvGlR5eid/JQIY/YbP3d9k7VV+RA=;
|
||||
path=/; expires=Wed, 20-Dec-23 17:30:38 GMT; domain=.api.openai.com; HttpOnly;
|
||||
- __cf_bm=nXec8HYJnh4Rn1QRDNXzg.1rd.W.EiYNxbdthhiZsOk-1709396577-1.0.1.1-vtpiT4ASoM0Dy4B4sHj2gHRT6MgVjs0yCyFgayAB7Zuv4cd2nQWchGJWeHRS1KkixeNO7et2fyyPaSe.vNStKg;
|
||||
path=/; expires=Sat, 02-Mar-24 16:52:57 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=G34V4_SI2mUudYSgDXesqo_OnsZMUghvvyGp95eog58-1703091638133-0-604800000;
|
||||
- _cfuvid=PD0znK6604iGLoXkKUr5RsY.y_qQMrBUy4_vL0RRy08-1709396577433-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
@@ -82,9 +206,9 @@ interactions:
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '1879'
|
||||
- '162'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -93,132 +217,17 @@ interactions:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-limit-tokens_usage_based:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299812'
|
||||
x-ratelimit-remaining-tokens_usage_based:
|
||||
- '299812'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 37ms
|
||||
x-ratelimit-reset-tokens_usage_based:
|
||||
- 37ms
|
||||
x-request-id:
|
||||
- 199af19ff2674901b03dbe9d166d7ffb
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
|
||||
lines of conversation provided, adding onto the previous summary returning a
|
||||
new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks
|
||||
of artificial intelligence. The AI thinks artificial intelligence is a force
|
||||
for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial
|
||||
intelligence is a force for good?\nAI: Because artificial intelligence will
|
||||
help humans reach their full potential.\n\nNew summary:\nThe human asks what
|
||||
the AI thinks of artificial intelligence. The AI thinks artificial intelligence
|
||||
is a force for good because it will help humans reach their full potential.\nEND
|
||||
OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: How much
|
||||
is 1 + 1?\nAI: 2\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false,
|
||||
"temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '871'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=H9RUg933Wznv5vpw9KzjZyJZSXUer6tLsBPnCGaO5sY-1703091638-1-AQMrr8fshVzTPZkSf5UmVC0gg4mnCVMhRfsAVDwFMAcb9eo7Gj6h8TFKL6YGvGlR5eid/JQIY/YbP3d9k7VV+RA=;
|
||||
_cfuvid=G34V4_SI2mUudYSgDXesqo_OnsZMUghvvyGp95eog58-1703091638133-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.6.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.6.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8XuDuhhGTeJIvAaVjt8lxngLaR7GV\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1703091638,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"The human asks the AI to add 1 + 1, to
|
||||
which the AI responds with 2.\"\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 150,\n \"completion_tokens\":
|
||||
22,\n \"total_tokens\": 172\n },\n \"system_fingerprint\": null\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 83897152ad2ba477-GRU
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 20 Dec 2023 17:00:41 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '2860'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-limit-tokens_usage_based:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299798'
|
||||
x-ratelimit-remaining-tokens_usage_based:
|
||||
- '299798'
|
||||
- '299797'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 40ms
|
||||
x-ratelimit-reset-tokens_usage_based:
|
||||
- 40ms
|
||||
x-request-id:
|
||||
- 6acd80ebd8c2463aa09cbb1550d14de3
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_031c5d635ad63d17d3865b3691401085
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
|
||||
@@ -1,36 +1,36 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goal\n\nTOOLS:\n------\nYou have access to the following
|
||||
tools:\n\nmultiplier: multiplier(numbers) -> float - Useful for when you need
|
||||
to multiply two numbers together. \n\t\t\tThe input to this tool should be a
|
||||
comma separated list of numbers of \n\t\t\tlength two, representing the two
|
||||
numbers you want to multiply together. \n\t\t\tFor example, `1,2` would be the
|
||||
input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use the exact
|
||||
following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the
|
||||
action to take, should be one of [multiplier]\nAction Input: the input to the
|
||||
action\nObservation: the result of the action\n```\n\nWhen you have a response
|
||||
for your task, or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
|
||||
Do I need to use a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis
|
||||
is the summary of your work so far:\n \nBegin! This is VERY important to
|
||||
you, your job depends on it!\n\nCurrent Task: What is 3 times 4\n\n"}], "model":
|
||||
"gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
|
||||
0.7}'
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role. test backstory\nYour
|
||||
personal goal is: test goal\n\nYou ONLY have access to the following tools,
|
||||
and should NEVER make up tools that are not listed here:\n\nmultiplier: multiplier(first_number:
|
||||
int, second_number: int) -> float - Useful for when you need to multiply two
|
||||
numbers together.\n\nUse the following format:\n\nThought: you should always
|
||||
think about what to do\nAction: the action to take, only one name of [multiplier],
|
||||
just the name, exactly as it''s written.\nAction Input: the input to the action,
|
||||
just a simple a python dictionary using \" to wrap keys and values.\nObservation:
|
||||
the result of the action\n\nOnce all necessary information is gathered:\n\nThought:
|
||||
I now know the final answer\nFinal Answer: the final answer to the original
|
||||
input question\n\n\nCurrent Task: What is 3 times 4\n\nThis is the expect criteria
|
||||
for your final answer: The result of the multiplication. \n you MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought: \n"}], "model": "gpt-4", "n": 1, "stop":
|
||||
["\nObservation"], "stream": true, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
- gzip, deflate, br
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1199'
|
||||
- '1267'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.6.0
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -40,7 +40,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.6.0
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
@@ -48,35 +48,166 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8XuE6OrjKKco53f0TIqPQEE3nWeNj\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1703091650,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction:
|
||||
multiplier\\nAction Input: 3,4\\n\"\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 260,\n \"completion_tokens\":
|
||||
24,\n \"total_tokens\": 284\n },\n \"system_fingerprint\": null\n}\n"
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
need"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
to"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
multiply"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"3"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
by"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"4"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":".\n\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Action"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"multi"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"plier"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"\n\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Action"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
Input"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"{\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
\""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"first"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"_number"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"\":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"3"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":",\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
\""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"second"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"_number"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"\":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"4"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"}\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQr05JWhma2mzBiHSxWgnaqxGsk","object":"chat.completion.chunk","created":1709396601,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8389719d5cba0110-GRU
|
||||
- 85e2bb185cc60309-GRU
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
- text/event-stream
|
||||
Date:
|
||||
- Wed, 20 Dec 2023 17:00:52 GMT
|
||||
- Sat, 02 Mar 2024 16:23:22 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=rIaStuxTRr_ZC91uSFg5cthTUq95O6PBdkxXZ68fLYc-1703091652-1-AZu+nvbL+3bwwQOIKpnYgLf5m5Mp0jfQ2baAlDRl1+FiTPO+/+GjcF4Upw4M8vtfh39ZyWF+l68r83qCS9OpObU=;
|
||||
path=/; expires=Wed, 20-Dec-23 17:30:52 GMT; domain=.api.openai.com; HttpOnly;
|
||||
- __cf_bm=7xt.evHqNHg.kYDcuSIOrcvpMtvbxgx1wKETTtKy8SY-1709396602-1.0.1.1-CsnMKefRtILqxvafEEHz4lmjZivT5_XhbZUlN4Vo0KudwSQ9Xoqg.qM7cLY4IlvsEMFktSJ9JvzfyeS.c39wWw;
|
||||
path=/; expires=Sat, 02-Mar-24 16:53:22 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=Ajd5lPskQSkBImLJdkywZGG4vHMkMCBcxb8TonP9OKc-1703091652762-0-604800000;
|
||||
- _cfuvid=4RaRnBJyviJKlQqJBMi0.odo7Txw_ovYAy9XQ3T2bX8-1709396602467-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
@@ -87,9 +218,9 @@ interactions:
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '2423'
|
||||
- '285'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -98,60 +229,56 @@ interactions:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-limit-tokens_usage_based:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299729'
|
||||
x-ratelimit-remaining-tokens_usage_based:
|
||||
- '299729'
|
||||
- '299706'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 54ms
|
||||
x-ratelimit-reset-tokens_usage_based:
|
||||
- 54ms
|
||||
- 58ms
|
||||
x-request-id:
|
||||
- 8f99f43731fa878eaf0fcbf0719d1b3f
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_22ba080f8a2efe9894a1399e42043b2c
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goal\n\nTOOLS:\n------\nYou have access to the following
|
||||
tools:\n\nmultiplier: multiplier(numbers) -> float - Useful for when you need
|
||||
to multiply two numbers together. \n\t\t\tThe input to this tool should be a
|
||||
comma separated list of numbers of \n\t\t\tlength two, representing the two
|
||||
numbers you want to multiply together. \n\t\t\tFor example, `1,2` would be the
|
||||
input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use the exact
|
||||
following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the
|
||||
action to take, should be one of [multiplier]\nAction Input: the input to the
|
||||
action\nObservation: the result of the action\n```\n\nWhen you have a response
|
||||
for your task, or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
|
||||
Do I need to use a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis
|
||||
is the summary of your work so far:\n \nBegin! This is VERY important to
|
||||
you, your job depends on it!\n\nCurrent Task: What is 3 times 4\nThought: Do
|
||||
I need to use a tool? Yes\nAction: multiplier\nAction Input: 3,4\n\nObservation:
|
||||
12\nThought: \n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream":
|
||||
false, "temperature": 0.7}'
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role. test backstory\nYour
|
||||
personal goal is: test goal\n\nYou ONLY have access to the following tools,
|
||||
and should NEVER make up tools that are not listed here:\n\nmultiplier: multiplier(first_number:
|
||||
int, second_number: int) -> float - Useful for when you need to multiply two
|
||||
numbers together.\n\nUse the following format:\n\nThought: you should always
|
||||
think about what to do\nAction: the action to take, only one name of [multiplier],
|
||||
just the name, exactly as it''s written.\nAction Input: the input to the action,
|
||||
just a simple a python dictionary using \" to wrap keys and values.\nObservation:
|
||||
the result of the action\n\nOnce all necessary information is gathered:\n\nThought:
|
||||
I now know the final answer\nFinal Answer: the final answer to the original
|
||||
input question\n\n\nCurrent Task: What is 3 times 4\n\nThis is the expect criteria
|
||||
for your final answer: The result of the multiplication. \n you MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought: \nI need to multiply 3 by 4.\n\nAction:
|
||||
\nmultiplier\n\nAction Input: \n{\n \"first_number\": 3,\n \"second_number\":
|
||||
4\n}\n\nObservation: 12\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"],
|
||||
"stream": true, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
- gzip, deflate, br
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1305'
|
||||
- '1410'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=rIaStuxTRr_ZC91uSFg5cthTUq95O6PBdkxXZ68fLYc-1703091652-1-AZu+nvbL+3bwwQOIKpnYgLf5m5Mp0jfQ2baAlDRl1+FiTPO+/+GjcF4Upw4M8vtfh39ZyWF+l68r83qCS9OpObU=;
|
||||
_cfuvid=Ajd5lPskQSkBImLJdkywZGG4vHMkMCBcxb8TonP9OKc-1703091652762-0-604800000
|
||||
- __cf_bm=7xt.evHqNHg.kYDcuSIOrcvpMtvbxgx1wKETTtKy8SY-1709396602-1.0.1.1-CsnMKefRtILqxvafEEHz4lmjZivT5_XhbZUlN4Vo0KudwSQ9Xoqg.qM7cLY4IlvsEMFktSJ9JvzfyeS.c39wWw;
|
||||
_cfuvid=4RaRnBJyviJKlQqJBMi0.odo7Txw_ovYAy9XQ3T2bX8-1709396602467-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.6.0
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -161,7 +288,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.6.0
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
@@ -169,28 +296,113 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8XuE86Ud94rsOP7VA4Bgxo6RM5XE6\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1703091652,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer:
|
||||
3 times 4 is 12.\"\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 293,\n \"completion_tokens\":
|
||||
22,\n \"total_tokens\": 315\n },\n \"system_fingerprint\": null\n}\n"
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-8yMQt0i99z2noUfcUGQrA5jpUMedV","object":"chat.completion.chunk","created":1709396603,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQt0i99z2noUfcUGQrA5jpUMedV","object":"chat.completion.chunk","created":1709396603,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQt0i99z2noUfcUGQrA5jpUMedV","object":"chat.completion.chunk","created":1709396603,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQt0i99z2noUfcUGQrA5jpUMedV","object":"chat.completion.chunk","created":1709396603,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQt0i99z2noUfcUGQrA5jpUMedV","object":"chat.completion.chunk","created":1709396603,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQt0i99z2noUfcUGQrA5jpUMedV","object":"chat.completion.chunk","created":1709396603,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
now"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQt0i99z2noUfcUGQrA5jpUMedV","object":"chat.completion.chunk","created":1709396603,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
know"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQt0i99z2noUfcUGQrA5jpUMedV","object":"chat.completion.chunk","created":1709396603,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
the"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQt0i99z2noUfcUGQrA5jpUMedV","object":"chat.completion.chunk","created":1709396603,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQt0i99z2noUfcUGQrA5jpUMedV","object":"chat.completion.chunk","created":1709396603,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQt0i99z2noUfcUGQrA5jpUMedV","object":"chat.completion.chunk","created":1709396603,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":".\n\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQt0i99z2noUfcUGQrA5jpUMedV","object":"chat.completion.chunk","created":1709396603,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQt0i99z2noUfcUGQrA5jpUMedV","object":"chat.completion.chunk","created":1709396603,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
Answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQt0i99z2noUfcUGQrA5jpUMedV","object":"chat.completion.chunk","created":1709396603,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQt0i99z2noUfcUGQrA5jpUMedV","object":"chat.completion.chunk","created":1709396603,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQt0i99z2noUfcUGQrA5jpUMedV","object":"chat.completion.chunk","created":1709396603,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"The"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQt0i99z2noUfcUGQrA5jpUMedV","object":"chat.completion.chunk","created":1709396603,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
result"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQt0i99z2noUfcUGQrA5jpUMedV","object":"chat.completion.chunk","created":1709396603,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
of"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQt0i99z2noUfcUGQrA5jpUMedV","object":"chat.completion.chunk","created":1709396603,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
the"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQt0i99z2noUfcUGQrA5jpUMedV","object":"chat.completion.chunk","created":1709396603,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
multiplication"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQt0i99z2noUfcUGQrA5jpUMedV","object":"chat.completion.chunk","created":1709396603,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
is"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQt0i99z2noUfcUGQrA5jpUMedV","object":"chat.completion.chunk","created":1709396603,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQt0i99z2noUfcUGQrA5jpUMedV","object":"chat.completion.chunk","created":1709396603,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"12"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQt0i99z2noUfcUGQrA5jpUMedV","object":"chat.completion.chunk","created":1709396603,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQt0i99z2noUfcUGQrA5jpUMedV","object":"chat.completion.chunk","created":1709396603,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 838971ae0f120110-GRU
|
||||
- 85e2bb256a680309-GRU
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
- text/event-stream
|
||||
Date:
|
||||
- Wed, 20 Dec 2023 17:00:55 GMT
|
||||
- Sat, 02 Mar 2024 16:23:24 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -202,9 +414,9 @@ interactions:
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '2334'
|
||||
- '269'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -213,133 +425,17 @@ interactions:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-limit-tokens_usage_based:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299703'
|
||||
x-ratelimit-remaining-tokens_usage_based:
|
||||
- '299703'
|
||||
- '299675'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 59ms
|
||||
x-ratelimit-reset-tokens_usage_based:
|
||||
- 59ms
|
||||
- 65ms
|
||||
x-request-id:
|
||||
- 44304b182424a8acad8a3121817bea58
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
|
||||
lines of conversation provided, adding onto the previous summary returning a
|
||||
new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks
|
||||
of artificial intelligence. The AI thinks artificial intelligence is a force
|
||||
for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial
|
||||
intelligence is a force for good?\nAI: Because artificial intelligence will
|
||||
help humans reach their full potential.\n\nNew summary:\nThe human asks what
|
||||
the AI thinks of artificial intelligence. The AI thinks artificial intelligence
|
||||
is a force for good because it will help humans reach their full potential.\nEND
|
||||
OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: What
|
||||
is 3 times 4\nAI: 3 times 4 is 12.\n\nNew summary:"}], "model": "gpt-4", "n":
|
||||
1, "stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '885'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=rIaStuxTRr_ZC91uSFg5cthTUq95O6PBdkxXZ68fLYc-1703091652-1-AZu+nvbL+3bwwQOIKpnYgLf5m5Mp0jfQ2baAlDRl1+FiTPO+/+GjcF4Upw4M8vtfh39ZyWF+l68r83qCS9OpObU=;
|
||||
_cfuvid=Ajd5lPskQSkBImLJdkywZGG4vHMkMCBcxb8TonP9OKc-1703091652762-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.6.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.6.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8XuEBWLslSBUMDHZRViwpoXcclYOk\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1703091655,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"The human asks the AI what is 3 times
|
||||
4, and the AI responds that 3 times 4 is 12.\"\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
155,\n \"completion_tokens\": 27,\n \"total_tokens\": 182\n },\n \"system_fingerprint\":
|
||||
null\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 838971be49500110-GRU
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 20 Dec 2023 17:00:58 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '2942'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-limit-tokens_usage_based:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299794'
|
||||
x-ratelimit-remaining-tokens_usage_based:
|
||||
- '299794'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 41ms
|
||||
x-ratelimit-reset-tokens_usage_based:
|
||||
- 41ms
|
||||
x-request-id:
|
||||
- 546e9b3713f3ff2d7f9868133efaa3a7
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_ec672e63b5c9b9abf5f9dc430aed379c
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
|
||||
@@ -1,36 +1,36 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goal\n\nTOOLS:\n------\nYou have access to the following
|
||||
tools:\n\nmultiplier: multiplier(numbers) -> float - Useful for when you need
|
||||
to multiply two numbers together. \n\t\t\tThe input to this tool should be a
|
||||
comma separated list of numbers of \n\t\t\tlength two, representing the two
|
||||
numbers you want to multiply together. \n\t\t\tFor example, `1,2` would be the
|
||||
input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use the exact
|
||||
following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the
|
||||
action to take, should be one of [multiplier]\nAction Input: the input to the
|
||||
action\nObservation: the result of the action\n```\n\nWhen you have a response
|
||||
for your task, or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
|
||||
Do I need to use a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis
|
||||
is the summary of your work so far:\n \nBegin! This is VERY important to
|
||||
you, your job depends on it!\n\nCurrent Task: What is 3 times 4\n\n"}], "model":
|
||||
"gpt-4", "n": 1, "stop": ["\nObservation"], "stream": false, "temperature":
|
||||
0.7}'
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role. test backstory\nYour
|
||||
personal goal is: test goal\n\nYou ONLY have access to the following tools,
|
||||
and should NEVER make up tools that are not listed here:\n\nmultiplier: multiplier(first_number:
|
||||
int, second_number: int) -> float - Useful for when you need to multiply two
|
||||
numbers together.\n\nUse the following format:\n\nThought: you should always
|
||||
think about what to do\nAction: the action to take, only one name of [multiplier],
|
||||
just the name, exactly as it''s written.\nAction Input: the input to the action,
|
||||
just a simple a python dictionary using \" to wrap keys and values.\nObservation:
|
||||
the result of the action\n\nOnce all necessary information is gathered:\n\nThought:
|
||||
I now know the final answer\nFinal Answer: the final answer to the original
|
||||
input question\n\n\nCurrent Task: What is 3 times 4?\n\nThis is the expect criteria
|
||||
for your final answer: The result of the multiplication. \n you MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought: \n"}], "model": "gpt-4", "n": 1, "stop":
|
||||
["\nObservation"], "stream": true, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
- gzip, deflate, br
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1199'
|
||||
- '1268'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.6.0
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -40,7 +40,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.6.0
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
@@ -48,35 +48,170 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8XuDxosP85Kqo6mU6biIggZ5c828i\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1703091641,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction:
|
||||
multiplier\\nAction Input: 3,4\"\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 260,\n \"completion_tokens\":
|
||||
23,\n \"total_tokens\": 283\n },\n \"system_fingerprint\": null\n}\n"
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
need"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
to"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
multiply"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"3"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
and"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"4"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
together"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":".\n\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Action"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"multi"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"plier"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"\n\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Action"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
Input"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"{\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
\""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"first"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"_number"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"\":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"3"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":",\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
\""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"second"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"_number"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"\":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"4"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"}\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQUCNDddQkY4w3pF6T7U64HPPLY","object":"chat.completion.chunk","created":1709396578,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 83897166eba7a5fd-GRU
|
||||
- 85e2ba85cf500126-GRU
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
- text/event-stream
|
||||
Date:
|
||||
- Wed, 20 Dec 2023 17:00:44 GMT
|
||||
- Sat, 02 Mar 2024 16:22:58 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=dRKr_rTtq3ZGad82s4Lpo6VOXPMLScbjq8fMvjANBpY-1703091644-1-AUDR6a/EPcG95H4He0KddFkZbk45hbZTA/BPUyFBTNiYGlzd2GIBZnPgpVOJXfr9n4lXV8jRf1bRmUJbsZnQ5MM=;
|
||||
path=/; expires=Wed, 20-Dec-23 17:30:44 GMT; domain=.api.openai.com; HttpOnly;
|
||||
- __cf_bm=wOO.fPfMTZeViJAXOrDQJ1N01e0qoe7CWIKH7TZrfUY-1709396578-1.0.1.1-S5c7jfwe_6L.D7meG2rnZiDmTNen7Zb_M.SrAL1rKbbN8lcbC0MhRIT6VCdyzOesw7jGHGnz1bb0v5qo.wCipw;
|
||||
path=/; expires=Sat, 02-Mar-24 16:52:58 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=suHEOi6nmUCq7cFZiZAg5nwyGtTeiFynig5_5V4esA8-1703091644341-0-604800000;
|
||||
- _cfuvid=f5uxBBVtyepcIKs_Xuooj_3swQvvq47nfiq3uPXT4QM-1709396578680-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
@@ -87,9 +222,9 @@ interactions:
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '2718'
|
||||
- '192'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -98,60 +233,56 @@ interactions:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-limit-tokens_usage_based:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299729'
|
||||
x-ratelimit-remaining-tokens_usage_based:
|
||||
- '299729'
|
||||
- '299707'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 54ms
|
||||
x-ratelimit-reset-tokens_usage_based:
|
||||
- 54ms
|
||||
- 58ms
|
||||
x-request-id:
|
||||
- 1714a9f5a2141d30f72506facf616944
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_36899f2b4e934fac8af1542bd3396235
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goal\n\nTOOLS:\n------\nYou have access to the following
|
||||
tools:\n\nmultiplier: multiplier(numbers) -> float - Useful for when you need
|
||||
to multiply two numbers together. \n\t\t\tThe input to this tool should be a
|
||||
comma separated list of numbers of \n\t\t\tlength two, representing the two
|
||||
numbers you want to multiply together. \n\t\t\tFor example, `1,2` would be the
|
||||
input if you wanted to multiply 1 by 2.\n\nTo use a tool, please use the exact
|
||||
following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the
|
||||
action to take, should be one of [multiplier]\nAction Input: the input to the
|
||||
action\nObservation: the result of the action\n```\n\nWhen you have a response
|
||||
for your task, or if you do not need to use a tool, you MUST use the format:\n\n```\nThought:
|
||||
Do I need to use a tool? No\nFinal Answer: [your response here]\n```\n\t\tThis
|
||||
is the summary of your work so far:\n \nBegin! This is VERY important to
|
||||
you, your job depends on it!\n\nCurrent Task: What is 3 times 4\nThought: Do
|
||||
I need to use a tool? Yes\nAction: multiplier\nAction Input: 3,4\nObservation:
|
||||
12\nThought: \n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream":
|
||||
false, "temperature": 0.7}'
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role. test backstory\nYour
|
||||
personal goal is: test goal\n\nYou ONLY have access to the following tools,
|
||||
and should NEVER make up tools that are not listed here:\n\nmultiplier: multiplier(first_number:
|
||||
int, second_number: int) -> float - Useful for when you need to multiply two
|
||||
numbers together.\n\nUse the following format:\n\nThought: you should always
|
||||
think about what to do\nAction: the action to take, only one name of [multiplier],
|
||||
just the name, exactly as it''s written.\nAction Input: the input to the action,
|
||||
just a simple a python dictionary using \" to wrap keys and values.\nObservation:
|
||||
the result of the action\n\nOnce all necessary information is gathered:\n\nThought:
|
||||
I now know the final answer\nFinal Answer: the final answer to the original
|
||||
input question\n\n\nCurrent Task: What is 3 times 4?\n\nThis is the expect criteria
|
||||
for your final answer: The result of the multiplication. \n you MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought: \nI need to multiply 3 and 4 together.\n\nAction:
|
||||
\nmultiplier\n\nAction Input: \n{\n \"first_number\": 3,\n \"second_number\":
|
||||
4\n}\n\nObservation: 12\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"],
|
||||
"stream": true, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
- gzip, deflate, br
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1303'
|
||||
- '1421'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=dRKr_rTtq3ZGad82s4Lpo6VOXPMLScbjq8fMvjANBpY-1703091644-1-AUDR6a/EPcG95H4He0KddFkZbk45hbZTA/BPUyFBTNiYGlzd2GIBZnPgpVOJXfr9n4lXV8jRf1bRmUJbsZnQ5MM=;
|
||||
_cfuvid=suHEOi6nmUCq7cFZiZAg5nwyGtTeiFynig5_5V4esA8-1703091644341-0-604800000
|
||||
- __cf_bm=wOO.fPfMTZeViJAXOrDQJ1N01e0qoe7CWIKH7TZrfUY-1709396578-1.0.1.1-S5c7jfwe_6L.D7meG2rnZiDmTNen7Zb_M.SrAL1rKbbN8lcbC0MhRIT6VCdyzOesw7jGHGnz1bb0v5qo.wCipw;
|
||||
_cfuvid=f5uxBBVtyepcIKs_Xuooj_3swQvvq47nfiq3uPXT4QM-1709396578680-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.6.0
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -161,7 +292,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.6.0
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
@@ -169,28 +300,123 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8XuE0tWsbI9QDkRau6rzSUZfuqhFN\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1703091644,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer:
|
||||
12\"\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 293,\n \"completion_tokens\":
|
||||
15,\n \"total_tokens\": 308\n },\n \"system_fingerprint\": null\n}\n"
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-8yMQVJc8UgDc0jJZVRgEWZu2ted30","object":"chat.completion.chunk","created":1709396579,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQVJc8UgDc0jJZVRgEWZu2ted30","object":"chat.completion.chunk","created":1709396579,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQVJc8UgDc0jJZVRgEWZu2ted30","object":"chat.completion.chunk","created":1709396579,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQVJc8UgDc0jJZVRgEWZu2ted30","object":"chat.completion.chunk","created":1709396579,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQVJc8UgDc0jJZVRgEWZu2ted30","object":"chat.completion.chunk","created":1709396579,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQVJc8UgDc0jJZVRgEWZu2ted30","object":"chat.completion.chunk","created":1709396579,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
now"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQVJc8UgDc0jJZVRgEWZu2ted30","object":"chat.completion.chunk","created":1709396579,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
know"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQVJc8UgDc0jJZVRgEWZu2ted30","object":"chat.completion.chunk","created":1709396579,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
the"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQVJc8UgDc0jJZVRgEWZu2ted30","object":"chat.completion.chunk","created":1709396579,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQVJc8UgDc0jJZVRgEWZu2ted30","object":"chat.completion.chunk","created":1709396579,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQVJc8UgDc0jJZVRgEWZu2ted30","object":"chat.completion.chunk","created":1709396579,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"\n\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQVJc8UgDc0jJZVRgEWZu2ted30","object":"chat.completion.chunk","created":1709396579,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQVJc8UgDc0jJZVRgEWZu2ted30","object":"chat.completion.chunk","created":1709396579,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
Answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQVJc8UgDc0jJZVRgEWZu2ted30","object":"chat.completion.chunk","created":1709396579,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQVJc8UgDc0jJZVRgEWZu2ted30","object":"chat.completion.chunk","created":1709396579,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQVJc8UgDc0jJZVRgEWZu2ted30","object":"chat.completion.chunk","created":1709396579,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"The"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQVJc8UgDc0jJZVRgEWZu2ted30","object":"chat.completion.chunk","created":1709396579,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
result"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQVJc8UgDc0jJZVRgEWZu2ted30","object":"chat.completion.chunk","created":1709396579,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
of"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQVJc8UgDc0jJZVRgEWZu2ted30","object":"chat.completion.chunk","created":1709396579,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQVJc8UgDc0jJZVRgEWZu2ted30","object":"chat.completion.chunk","created":1709396579,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"3"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQVJc8UgDc0jJZVRgEWZu2ted30","object":"chat.completion.chunk","created":1709396579,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
times"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQVJc8UgDc0jJZVRgEWZu2ted30","object":"chat.completion.chunk","created":1709396579,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQVJc8UgDc0jJZVRgEWZu2ted30","object":"chat.completion.chunk","created":1709396579,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"4"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQVJc8UgDc0jJZVRgEWZu2ted30","object":"chat.completion.chunk","created":1709396579,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
is"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQVJc8UgDc0jJZVRgEWZu2ted30","object":"chat.completion.chunk","created":1709396579,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQVJc8UgDc0jJZVRgEWZu2ted30","object":"chat.completion.chunk","created":1709396579,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"12"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQVJc8UgDc0jJZVRgEWZu2ted30","object":"chat.completion.chunk","created":1709396579,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMQVJc8UgDc0jJZVRgEWZu2ted30","object":"chat.completion.chunk","created":1709396579,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 838971796af6a5fd-GRU
|
||||
- 85e2ba8e18320126-GRU
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
- text/event-stream
|
||||
Date:
|
||||
- Wed, 20 Dec 2023 17:00:46 GMT
|
||||
- Sat, 02 Mar 2024 16:23:00 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -202,9 +428,9 @@ interactions:
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '2355'
|
||||
- '270'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -213,132 +439,17 @@ interactions:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-limit-tokens_usage_based:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299704'
|
||||
x-ratelimit-remaining-tokens_usage_based:
|
||||
- '299704'
|
||||
- '299672'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 59ms
|
||||
x-ratelimit-reset-tokens_usage_based:
|
||||
- 59ms
|
||||
- 65ms
|
||||
x-request-id:
|
||||
- a3de9d34f17496d9bdd2ae9360f6054a
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
|
||||
lines of conversation provided, adding onto the previous summary returning a
|
||||
new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks
|
||||
of artificial intelligence. The AI thinks artificial intelligence is a force
|
||||
for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial
|
||||
intelligence is a force for good?\nAI: Because artificial intelligence will
|
||||
help humans reach their full potential.\n\nNew summary:\nThe human asks what
|
||||
the AI thinks of artificial intelligence. The AI thinks artificial intelligence
|
||||
is a force for good because it will help humans reach their full potential.\nEND
|
||||
OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: What
|
||||
is 3 times 4\nAI: 12\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream":
|
||||
false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '871'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=dRKr_rTtq3ZGad82s4Lpo6VOXPMLScbjq8fMvjANBpY-1703091644-1-AUDR6a/EPcG95H4He0KddFkZbk45hbZTA/BPUyFBTNiYGlzd2GIBZnPgpVOJXfr9n4lXV8jRf1bRmUJbsZnQ5MM=;
|
||||
_cfuvid=suHEOi6nmUCq7cFZiZAg5nwyGtTeiFynig5_5V4esA8-1703091644341-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.6.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.6.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8XuE3Di6FRdNAetXNfWs6OWyRAfkf\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1703091647,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"The human asks the AI what is 3 times
|
||||
4 and the AI responds with 12.\"\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 149,\n \"completion_tokens\":
|
||||
20,\n \"total_tokens\": 169\n },\n \"system_fingerprint\": null\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 838971899954a5fd-GRU
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 20 Dec 2023 17:00:49 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '2698'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-limit-tokens_usage_based:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299798'
|
||||
x-ratelimit-remaining-tokens_usage_based:
|
||||
- '299798'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 40ms
|
||||
x-ratelimit-reset-tokens_usage_based:
|
||||
- 40ms
|
||||
x-request-id:
|
||||
- ddbd97cea4ec099c21c00ca922157ae1
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_738d754c4ba72de190f6b56f0c8adf80
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
|
||||
2483
tests/cassettes/test_agent_function_calling_llm.yaml
Normal file
2483
tests/cassettes/test_agent_function_calling_llm.yaml
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,34 +1,38 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to the following
|
||||
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
|
||||
answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo
|
||||
use a tool, please use the exact following format:\n\n```\nThought: Do I need
|
||||
to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer],
|
||||
just the name.\nAction Input: the input to the action\nObservation: the result
|
||||
of the action\n```\n\nWhen you have a response for your task, or if you do not
|
||||
need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use
|
||||
a tool? No\nFinal Answer: [your response here]This is the summary of your work
|
||||
so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent
|
||||
Task: The final answer is 42. But don''t give it yet, instead keep using the
|
||||
`get_final_answer` tool.\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"],
|
||||
"stream": false, "temperature": 0.7}'
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role. test backstory\nYour
|
||||
personal goal is: test goal\n\nYou ONLY have access to the following tools,
|
||||
and should NEVER make up tools that are not listed here:\n\nget_final_answer:
|
||||
get_final_answer(numbers) -> float - Get the final answer but don''t give it
|
||||
yet, just re-use this\n tool non-stop.\n\nUse the following format:\n\nThought:
|
||||
you should always think about what to do\nAction: the action to take, only one
|
||||
name of [get_final_answer], just the name, exactly as it''s written.\nAction
|
||||
Input: the input to the action, just a simple a python dictionary using \" to
|
||||
wrap keys and values.\nObservation: the result of the action\n\nOnce all necessary
|
||||
information is gathered:\n\nThought: I now know the final answer\nFinal Answer:
|
||||
the final answer to the original input question\n\n\nCurrent Task: The final
|
||||
answer is 42. But don''t give it yet, instead keep using the `get_final_answer`
|
||||
tool over and over until you''re told you can give yout final answer.\n\nThis
|
||||
is the expect criteria for your final answer: The final answer \n you MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought: \n"}], "model": "gpt-4", "n": 1, "stop":
|
||||
["\nObservation"], "stream": true, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
- gzip, deflate, br
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1075'
|
||||
- '1404'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.7.1
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -38,7 +42,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.7.1
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
@@ -46,36 +50,208 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8frBTCWXULTV5ZYHy3Y5JXKovrKiN\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1704986579,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: Do I need to use a tool? Yes\\nAction:
|
||||
get_final_answer\\nAction Input: [42]\"\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
233,\n \"completion_tokens\": 24,\n \"total_tokens\": 257\n },\n \"system_fingerprint\":
|
||||
null\n}\n"
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
need"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
to"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
use"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
the"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
`"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"get"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"_final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"_answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"`"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
tool"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
to"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
keep"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
producing"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
the"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":","},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
which"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
is"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"42"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":","},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
until"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"''m"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
allowed"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
to"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
give"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
the"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":".\n\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Action"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
get"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"_final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"_answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Action"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
Input"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
{\""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"numbers"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"\":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"42"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"}"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMR8E5J6Z7WsEXdRVIOHrkJggCKB","object":"chat.completion.chunk","created":1709396618,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 843e2886ceca1d23-GRU
|
||||
- 85e2bb7f3bd9a4c2-GRU
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
- text/event-stream
|
||||
Date:
|
||||
- Thu, 11 Jan 2024 15:23:03 GMT
|
||||
- Sat, 02 Mar 2024 16:23:38 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=GdfwhILXB4b01GTJ_0AXbmROnfxzuPlJLxFJLh4vT8s-1704986583-1-AVb+x5LLEXeeVIiDv7ug/2lnD4qFsyXri+Vg04LYp0s2eK+KH8sGMWHpPzgzKOu9sf3rVi7Fl2OOuY7+OjbUYY8=;
|
||||
path=/; expires=Thu, 11-Jan-24 15:53:03 GMT; domain=.api.openai.com; HttpOnly;
|
||||
- __cf_bm=BJ9J5ZB2LGzG3IJTjWzoYvGs3wDj0V0VeE0P5m6e6mk-1709396618-1.0.1.1-xKJ9iIp1aWCMf0.bK0_uMSaX1dUy7wD5RH66blS6oxh5G8C4BzitOiBHJILpF9eBLLTBi87JHXtmjE43IQoj8g;
|
||||
path=/; expires=Sat, 02-Mar-24 16:53:38 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=kdwpHybL9TBve9Df7KLsRqp49GrJ05.atUaH_t6plL0-1704986583862-0-604800000;
|
||||
- _cfuvid=zxYWr.2muqBLhD6fimYzulEcDwykT1ueTKVBAkcCrws-1709396618969-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
@@ -86,228 +262,9 @@ interactions:
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '4424'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299755'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 49ms
|
||||
x-request-id:
|
||||
- 76974d365254ca84f70c43fc31af3378
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to the following
|
||||
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
|
||||
answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo
|
||||
use a tool, please use the exact following format:\n\n```\nThought: Do I need
|
||||
to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer],
|
||||
just the name.\nAction Input: the input to the action\nObservation: the result
|
||||
of the action\n```\n\nWhen you have a response for your task, or if you do not
|
||||
need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use
|
||||
a tool? No\nFinal Answer: [your response here]This is the summary of your work
|
||||
so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent
|
||||
Task: The final answer is 42. But don''t give it yet, instead keep using the
|
||||
`get_final_answer` tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
|
||||
Input: [42]\nObservation: 42\nThought: "}], "model": "gpt-4", "n": 1, "stop":
|
||||
["\nObservation"], "stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1186'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=GdfwhILXB4b01GTJ_0AXbmROnfxzuPlJLxFJLh4vT8s-1704986583-1-AVb+x5LLEXeeVIiDv7ug/2lnD4qFsyXri+Vg04LYp0s2eK+KH8sGMWHpPzgzKOu9sf3rVi7Fl2OOuY7+OjbUYY8=;
|
||||
_cfuvid=kdwpHybL9TBve9Df7KLsRqp49GrJ05.atUaH_t6plL0-1704986583862-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.7.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.7.1
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8frBYkyVPtJuAJCESaOxEBg3UAfl4\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1704986584,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Do I need to use a tool? Yes\\nAction:
|
||||
get_final_answer\\nAction Input: [42]\"\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
266,\n \"completion_tokens\": 22,\n \"total_tokens\": 288\n },\n \"system_fingerprint\":
|
||||
null\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 843e28a57d911d23-GRU
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 11 Jan 2024 15:23:07 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '3329'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '300000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299728'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 54ms
|
||||
x-request-id:
|
||||
- 1b9a1e09f863ff69cecfe4e7bed0aee5
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
personal goal is: test goalTOOLS:\n------\nYou have access to the following
|
||||
tools:\n\nget_final_answer: get_final_answer(numbers) -> float - Get the final
|
||||
answer but don''t give it yet, just re-use this\n tool non-stop.\n\nTo
|
||||
use a tool, please use the exact following format:\n\n```\nThought: Do I need
|
||||
to use a tool? Yes\nAction: the action to take, should be one of [get_final_answer],
|
||||
just the name.\nAction Input: the input to the action\nObservation: the result
|
||||
of the action\n```\n\nWhen you have a response for your task, or if you do not
|
||||
need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use
|
||||
a tool? No\nFinal Answer: [your response here]This is the summary of your work
|
||||
so far:\nBegin! This is VERY important to you, your job depends on it!\n\nCurrent
|
||||
Task: The final answer is 42. But don''t give it yet, instead keep using the
|
||||
`get_final_answer` tool.\nThought: Do I need to use a tool? Yes\nAction: get_final_answer\nAction
|
||||
Input: [42]\nObservation: 42\nThought: Do I need to use a tool? Yes\nAction:
|
||||
get_final_answer\nAction Input: [42]\nObservation: I''ve used too many tools
|
||||
for this task.\nI''m going to give you my absolute BEST Final answer now and\nnot
|
||||
use any more tools.\nThought: "}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"],
|
||||
"stream": false, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1411'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=GdfwhILXB4b01GTJ_0AXbmROnfxzuPlJLxFJLh4vT8s-1704986583-1-AVb+x5LLEXeeVIiDv7ug/2lnD4qFsyXri+Vg04LYp0s2eK+KH8sGMWHpPzgzKOu9sf3rVi7Fl2OOuY7+OjbUYY8=;
|
||||
_cfuvid=kdwpHybL9TBve9Df7KLsRqp49GrJ05.atUaH_t6plL0-1704986583862-0-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.7.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.7.1
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8frBbQ3vq0kEry4X3a1RkMEkIAP99\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1704986587,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Do I need to use a tool? No\\nFinal Answer:
|
||||
I have used the tool multiple times and the final answer remains 42.\"\n },\n
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||
\ \"usage\": {\n \"prompt_tokens\": 323,\n \"completion_tokens\": 28,\n
|
||||
\ \"total_tokens\": 351\n },\n \"system_fingerprint\": null\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 843e28bbbb071d23-GRU
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 11 Jan 2024 15:23:13 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
openai-processing-ms:
|
||||
- '5459'
|
||||
- '329'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -325,42 +282,53 @@ interactions:
|
||||
x-ratelimit-reset-tokens:
|
||||
- 65ms
|
||||
x-request-id:
|
||||
- 0a5c1064b324c997b16bf17d426f9638
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_144cb3b2f499dc0c26d15ccba1ecc6c8
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
|
||||
lines of conversation provided, adding onto the previous summary returning a
|
||||
new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks
|
||||
of artificial intelligence. The AI thinks artificial intelligence is a force
|
||||
for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial
|
||||
intelligence is a force for good?\nAI: Because artificial intelligence will
|
||||
help humans reach their full potential.\n\nNew summary:\nThe human asks what
|
||||
the AI thinks of artificial intelligence. The AI thinks artificial intelligence
|
||||
is a force for good because it will help humans reach their full potential.\nEND
|
||||
OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: The final
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role. test backstory\nYour
|
||||
personal goal is: test goal\n\nYou ONLY have access to the following tools,
|
||||
and should NEVER make up tools that are not listed here:\n\nget_final_answer:
|
||||
get_final_answer(numbers) -> float - Get the final answer but don''t give it
|
||||
yet, just re-use this\n tool non-stop.\n\nUse the following format:\n\nThought:
|
||||
you should always think about what to do\nAction: the action to take, only one
|
||||
name of [get_final_answer], just the name, exactly as it''s written.\nAction
|
||||
Input: the input to the action, just a simple a python dictionary using \" to
|
||||
wrap keys and values.\nObservation: the result of the action\n\nOnce all necessary
|
||||
information is gathered:\n\nThought: I now know the final answer\nFinal Answer:
|
||||
the final answer to the original input question\n\n\nCurrent Task: The final
|
||||
answer is 42. But don''t give it yet, instead keep using the `get_final_answer`
|
||||
tool.\nAI: I have used the tool multiple times and the final answer remains
|
||||
42.\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false, "temperature":
|
||||
0.7}'
|
||||
tool over and over until you''re told you can give yout final answer.\n\nThis
|
||||
is the expect criteria for your final answer: The final answer \n you MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought: \nI need to use the `get_final_answer` tool
|
||||
to keep producing the final answer, which is 42, until I''m allowed to give
|
||||
the final answer.\n\nAction: get_final_answer\nAction Input: {\"numbers\": 42}\nObservation:
|
||||
42\nTool won''t be use because it''s time to give your final answer. Don''t
|
||||
use tools and just your absolute BEST Final answer.\nObservation: Tool won''t
|
||||
be use because it''s time to give your final answer. Don''t use tools and just
|
||||
your absolute BEST Final answer.\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"],
|
||||
"stream": true, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
- gzip, deflate, br
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1014'
|
||||
- '1875'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=GdfwhILXB4b01GTJ_0AXbmROnfxzuPlJLxFJLh4vT8s-1704986583-1-AVb+x5LLEXeeVIiDv7ug/2lnD4qFsyXri+Vg04LYp0s2eK+KH8sGMWHpPzgzKOu9sf3rVi7Fl2OOuY7+OjbUYY8=;
|
||||
_cfuvid=kdwpHybL9TBve9Df7KLsRqp49GrJ05.atUaH_t6plL0-1704986583862-0-604800000
|
||||
- __cf_bm=BJ9J5ZB2LGzG3IJTjWzoYvGs3wDj0V0VeE0P5m6e6mk-1709396618-1.0.1.1-xKJ9iIp1aWCMf0.bK0_uMSaX1dUy7wD5RH66blS6oxh5G8C4BzitOiBHJILpF9eBLLTBi87JHXtmjE43IQoj8g;
|
||||
_cfuvid=zxYWr.2muqBLhD6fimYzulEcDwykT1ueTKVBAkcCrws-1709396618969-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.7.1
|
||||
- OpenAI/Python 1.12.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -370,7 +338,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.7.1
|
||||
- 1.12.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
@@ -378,30 +346,80 @@ interactions:
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-8frBhKxiCRICQ8o6aJanmn8PTMsAr\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1704986593,\n \"model\": \"gpt-4-0613\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"The human tells the AI that the final
|
||||
answer is 42 and instructs it to continue using the `get_final_answer` tool.
|
||||
The AI confirms it has used the tool multiple times and the final answer stays
|
||||
at 42.\"\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 178,\n \"completion_tokens\":
|
||||
46,\n \"total_tokens\": 224\n },\n \"system_fingerprint\": null\n}\n"
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-8yMRA48f6ml2XuKes1VxiWJrM5htF","object":"chat.completion.chunk","created":1709396620,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMRA48f6ml2XuKes1VxiWJrM5htF","object":"chat.completion.chunk","created":1709396620,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMRA48f6ml2XuKes1VxiWJrM5htF","object":"chat.completion.chunk","created":1709396620,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMRA48f6ml2XuKes1VxiWJrM5htF","object":"chat.completion.chunk","created":1709396620,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMRA48f6ml2XuKes1VxiWJrM5htF","object":"chat.completion.chunk","created":1709396620,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
now"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMRA48f6ml2XuKes1VxiWJrM5htF","object":"chat.completion.chunk","created":1709396620,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
know"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMRA48f6ml2XuKes1VxiWJrM5htF","object":"chat.completion.chunk","created":1709396620,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
the"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMRA48f6ml2XuKes1VxiWJrM5htF","object":"chat.completion.chunk","created":1709396620,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMRA48f6ml2XuKes1VxiWJrM5htF","object":"chat.completion.chunk","created":1709396620,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMRA48f6ml2XuKes1VxiWJrM5htF","object":"chat.completion.chunk","created":1709396620,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMRA48f6ml2XuKes1VxiWJrM5htF","object":"chat.completion.chunk","created":1709396620,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMRA48f6ml2XuKes1VxiWJrM5htF","object":"chat.completion.chunk","created":1709396620,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
Answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMRA48f6ml2XuKes1VxiWJrM5htF","object":"chat.completion.chunk","created":1709396620,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMRA48f6ml2XuKes1VxiWJrM5htF","object":"chat.completion.chunk","created":1709396620,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||
"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMRA48f6ml2XuKes1VxiWJrM5htF","object":"chat.completion.chunk","created":1709396620,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"42"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-8yMRA48f6ml2XuKes1VxiWJrM5htF","object":"chat.completion.chunk","created":1709396620,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 843e28df4ae81d23-GRU
|
||||
- 85e2bb8f1861a4c2-GRU
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
- text/event-stream
|
||||
Date:
|
||||
- Thu, 11 Jan 2024 15:23:18 GMT
|
||||
- Sat, 02 Mar 2024 16:23:41 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -413,9 +431,9 @@ interactions:
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- user-z7g4wmlazxqvc5wjyaaaocfz
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '5518'
|
||||
- '282'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -427,13 +445,14 @@ interactions:
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '299761'
|
||||
- '299557'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 47ms
|
||||
- 88ms
|
||||
x-request-id:
|
||||
- 4100fde9c68d27d808de645637b3e7cc
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- req_882d8d5058807db03dc8925821365417
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user