Compare commits

...

18 Commits

Author SHA1 Message Date
João Moura
1f802ccb5a removing logs and preping new version 2024-02-28 03:44:23 -03:00
João Moura
e1306a8e6a removing necessary crewai-tools dependency 2024-02-28 03:44:23 -03:00
João Moura
997c906b5f adding support for input interpolation for tasks and agents 2024-02-28 03:44:23 -03:00
João Moura
2530196cf8 fixing tests 2024-02-28 03:44:23 -03:00
João Moura
340bea3271 Adding ability to track tools_errors and delegations 2024-02-28 03:44:23 -03:00
João Moura
3df3bba756 changing method naming to increment 2024-02-28 03:44:23 -03:00
João Moura
a9863fe670 Adding overall usage_metrics to crew and not adding delegation tools if there no agents the allow delegation 2024-02-28 03:44:23 -03:00
João Moura
7b49b4e985 Adding initial formatting error counting and token counter 2024-02-28 03:44:23 -03:00
João Moura
577db88f8e Updating README 2024-02-28 03:44:23 -03:00
João Moura
01a2e650a4 Adding write job description example 2024-02-28 03:44:23 -03:00
BR
cd9f7931c9 Fix Creating-a-Crew-and-kick-it-off.md so it can run (#280)
* Fix Creating-a-Crew-and-kick-it-off.md

- Update deps to include `crewai[tools]`
- Remove invalid `max_inter` arg from Task constructor call

* Update Creating-a-Crew-and-kick-it-off.md

---------

Co-authored-by: João Moura <joaomdmoura@gmail.com>
2024-02-27 14:23:19 -03:00
João Moura
2b04ae4e4a updating docs 2024-02-26 15:54:06 -03:00
João Moura
cd0b82e794 Cutting new version removing crewai-tool as a mandatory dependency 2024-02-26 15:27:04 -03:00
João Moura
0ddcffe601 updating telemetry timeout 2024-02-26 13:40:41 -03:00
João Moura
712d106a44 updating docs 2024-02-26 13:38:14 -03:00
João Moura
34c5560cb0 updating telemetry code and gitignore 2024-02-24 16:18:26 -03:00
João Moura
dcba1488a6 make agents not have a memory by default 2024-02-24 03:33:05 -03:00
João Moura
8e4b156f11 preparing new version 2024-02-24 03:30:12 -03:00
46 changed files with 30122 additions and 30873 deletions

3
.gitignore vendored
View File

@@ -6,4 +6,5 @@ dist/
assets/*
.idea
test/
docs_crew/
docs_crew/
chroma.sqlite3

View File

@@ -24,6 +24,7 @@
- [Key Features](#key-features)
- [Examples](#examples)
- [Quick Tutorial](#quick-tutorial)
- [Write Job Descriptions](#write-job-descriptions)
- [Trip Planner](#trip-planner)
- [Stock Analysis](#stock-analysis)
- [Connecting Your Crew to a Model](#connecting-your-crew-to-a-model)
@@ -48,6 +49,12 @@ To get started with CrewAI, follow these simple steps:
pip install crewai
```
If you want to also install crewai-tools, which is a package with tools that can be used by the agents, but more dependencies, you can install it with:
```shell
pip install 'crewai[tools]'
```
The example below also uses DuckDuckGo's Search. You can install it with `pip` too:
```shell
@@ -62,9 +69,10 @@ from crewai import Agent, Task, Crew, Process
os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY"
# You can choose to use a local model through Ollama for example. See ./docs/how-to/llm-connections.md for more information.
# from langchain_community.llms import Ollama
# ollama_llm = Ollama(model="openhermes")
# You can choose to use a local model through Ollama for example. See https://docs.crewai.com/how-to/LLM-Connections/ for more information.
# osOPENAI_API_BASE='http://localhost:11434/v1'
# OPENAI_MODEL_NAME='openhermes' # Adjust based on available model
# OPENAI_API_KEY=''
# Install duckduckgo-search for this example:
# !pip install -U duckduckgo-search
@@ -84,12 +92,12 @@ researcher = Agent(
tools=[search_tool]
# You can pass an optional llm attribute specifying what mode you wanna use.
# It can be a local model through Ollama / LM Studio or a remote
# model like OpenAI, Mistral, Antrophic or others (https://python.langchain.com/docs/integrations/llms/)
# model like OpenAI, Mistral, Antrophic or others (https://docs.crewai.com/how-to/LLM-Connections/)
#
# Examples:
# import os
# os.environ['OPENAI_MODEL_NAME'] = 'gpt-3.5-turbo'
#
# from langchain_community.llms import Ollama
# llm=ollama_llm # was defined above in the file
# OR
#
# from langchain_openai import ChatOpenAI
# llm=ChatOpenAI(model_name="gpt-3.5", temperature=0.7)
@@ -100,15 +108,14 @@ writer = Agent(
backstory="""You are a renowned Content Strategist, known for your insightful and engaging articles.
You transform complex concepts into compelling narratives.""",
verbose=True,
allow_delegation=True,
# (optional) llm=ollama_llm
allow_delegation=True
)
# Create tasks for your agents
task1 = Task(
description="""Conduct a comprehensive analysis of the latest advancements in AI in 2024.
Identify key trends, breakthrough technologies, and potential industry impacts.
Your final answer MUST be a full analysis report""",
Identify key trends, breakthrough technologies, and potential industry impacts.""",
expected_output="Full analysis report in bullet points",
agent=researcher
)
@@ -116,8 +123,8 @@ task2 = Task(
description="""Using the insights provided, develop an engaging blog
post that highlights the most significant AI advancements.
Your post should be informative yet accessible, catering to a tech-savvy audience.
Make it sound cool, avoid complex words so it doesn't sound like AI.
Your final answer MUST be the full blog post of at least 4 paragraphs.""",
Make it sound cool, avoid complex words so it doesn't sound like AI.""",
expected_output="Full blog post of at least 4 paragraphs",
agent=writer
)
@@ -143,6 +150,8 @@ In addition to the sequential process, you can use the hierarchical process, whi
- **Autonomous Inter-Agent Delegation**: Agents can autonomously delegate tasks and inquire amongst themselves, enhancing problem-solving efficiency.
- **Flexible Task Management**: Define tasks with customizable tools and assign them to agents dynamically.
- **Processes Driven**: Currently only supports `sequential` task execution and `hierarchical` processes, but more complex processes like consensual and autonomous are being worked on.
- **Save output as file**: Save the output of individual tasks as a file, so you can use it later.
- **Parse output as Pydantic or Json**: Parse the output of individual tasks as a Pydantic model or as a Json if you want to.
- **Works with Open Source Models**: Run your crew using Open AI or open source models refer to the [Connect crewAI to LLMs](https://docs.crewai.com/how-to/LLM-Connections/) page for details on configuring you agents' connections to models, even ones running locally!
![CrewAI Mind Map](./docs/crewAI-mindmap.png "CrewAI Mind Map")
@@ -160,6 +169,12 @@ You can test different real life examples of AI crews in the [crewAI-examples re
[![CrewAI Tutorial](https://img.youtube.com/vi/tnejrr-0a94/maxresdefault.jpg)](https://www.youtube.com/watch?v=tnejrr-0a94 "CrewAI Tutorial")
### Write Job Descriptions
[Check out code for this example](https://github.com/joaomdmoura/crewAI-examples/tree/main/job-posting) or watch a video below:
[![Jobs postings](https://img.youtube.com/vi/u98wEMz-9to/maxresdefault.jpg)](https://www.youtube.com/watch?v=u98wEMz-9to "Jobs postings")
### Trip Planner
[Check out code for this example](https://github.com/joaomdmoura/crewAI-examples/tree/main/trip_planner) or watch a video below:
@@ -180,7 +195,7 @@ Please refer to the [Connect crewAI to LLMs](https://docs.crewai.com/how-to/LLM-
## How CrewAI Compares
- **Autogen**: While Autogen excels in creating conversational agents capable of working together, it lacks an inherent concept of process. In Autogen, orchestrating agents' interactions requires additional programming, which can become complex and cumbersome as the scale of tasks grows.
- **Autogen**: While Autogen does good in creating conversational agents capable of working together, it lacks an inherent concept of process. In Autogen, orchestrating agents' interactions requires additional programming, which can become complex and cumbersome as the scale of tasks grows.
- **ChatDev**: ChatDev introduced the idea of processes into the realm of AI agents, but its implementation is quite rigid. Customizations in ChatDev are limited and not geared towards production environments, which can hinder scalability and flexibility in real-world applications.

View File

@@ -15,26 +15,27 @@ description: What are crewAI Agents and how to use them.
## Agent Attributes
| Attribute | Description |
| :---------- | :----------------------------------- |
| **Role** | Defines the agent's function within the crew. It determines the kind of tasks the agent is best suited for. |
| **Goal** | The individual objective that the agent aims to achieve. It guides the agent's decision-making process. |
| **Backstory** | Provides context to the agent's role and goal, enriching the interaction and collaboration dynamics. |
| **LLM** | The language model used by the agent to process and generate text. |
| **Tools** | Set of capabilities or functions that the agent can use to perform tasks. Tools can be shared or exclusive to specific agents. |
| **Function Calling LLM** | The language model used by this agent to call functions, if none is passed the same main llm for each agent will be used. |
| **Max Iter** | The maximum number of iterations the agent can perform before forced to give its best answer |
| **Max RPM** | The maximum number of requests per minute the agent can perform to avoid rate limits |
| **Verbose** | This allow you to actually see what is going on during the Crew execution. |
| **Allow Delegation** | Agents can delegate tasks or questions to one another, ensuring that each task is handled by the most suitable agent. |
| **Step Callback** | A function that is called after each step of the agent. This can be used to log the agent's actions or to perform other operations. It will overwrite the crew `step_callback` |
| Attribute | Description |
| :------------------ | :----------------------------------- |
| **Role** | Defines the agent's function within the crew. It determines the kind of tasks the agent is best suited for. |
| **Goal** | The individual objective that the agent aims to achieve. It guides the agent's decision-making process. |
| **Backstory** | Provides context to the agent's role and goal, enriching the interaction and collaboration dynamics. |
| **LLM** | The language model used by the agent to process and generate text. Defaults to using OpenAI's GPT-4 (`ChatOpenAI`), unless another model is specified through the environment variable "OPENAI_MODEL_NAME". |
| **Tools** | Set of capabilities or functions that the agent can use to perform tasks. Tools can be shared or exclusive to specific agents. It's an attribute that can be set during the initialization of an agent. |
| **Function Calling LLM** | The language model used by this agent to call functions. It is an optional field and, if not provided, the behavior of defaulting to the main `llm` is implicit. |
| **Max Iter** | The maximum number of iterations the agent can perform before being forced to give its best answer. Default is `15`. |
| **Max RPM** | The maximum number of requests per minute the agent can perform to avoid rate limits. It's optional and can be left unspecified. |
| **Verbose** | Enables detailed logging of the agent's execution for debugging or monitoring purposes when set to True. Default is `False` |
| **Allow Delegation**| Agents can delegate tasks or questions to one another, ensuring that each task is handled by the most suitable agent. |
| **Step Callback** | A function that is called after each step of the agent. This can be used to log the agent's actions or to perform other operations. It will overwrite the crew `step_callback`. |
| **Memory** | Indicates whether the agent should have memory or not, with a default value of False. This impacts the agent's ability to remember past interactions. Default is `False` |
## Creating an Agent
!!! note "Agent Interaction"
Agents can interact with each other using the CrewAI's built-in delegation and communication mechanisms.<br/>This allows for dynamic task management and problem-solving within the crew.
To create an agent, you would typically initialize an instance of the `Agent` class with the desired properties. Here's a conceptual example:
To create an agent, you would typically initialize an instance of the `Agent` class with the desired properties. Here's a conceptual example including all attributes:
```python
# Example: Creating an agent with all attributes
@@ -48,14 +49,15 @@ agent = Agent(
to the business.
You're currently working on a project to analyze the
performance of our marketing campaigns.""",
tools=[my_tool1, my_tool2],
llm=my_llm,
function_calling_llm=my_llm,
max_iter=10,
max_rpm=10,
verbose=True,
allow_delegation=True,
step_callback=my_intermediate_step_callback
tools=[my_tool1, my_tool2], # Optional
llm=my_llm, # Optional
function_calling_llm=my_llm, # Optional
max_iter=15, # Optional
max_rpm=None, # Optional
verbose=True, # Optional
allow_delegation=True, # Optional
step_callback=my_intermediate_step_callback, # Optional
memory=True # Optional
)
```

View File

@@ -1,6 +1,6 @@
---
title: How Agents Collaborate in CrewAI
description: Exploring the dynamics of agent collaboration within the CrewAI framework.
description: Exploring the dynamics of agent collaboration within the CrewAI framework, focusing on the newly integrated features for enhanced functionality.
---
## Collaboration Fundamentals
@@ -11,14 +11,27 @@ description: Exploring the dynamics of agent collaboration within the CrewAI fra
- **Task Assistance**: Allows agents to seek help from peers with the required expertise for specific tasks.
- **Resource Allocation**: Optimizes task execution through the efficient distribution and sharing of resources among agents.
## Enhanced Attributes for Improved Collaboration
The `Crew` class has been enriched with several attributes to support advanced functionalities:
- **Language Model Management (`manager_llm`, `function_calling_llm`)**: Manages language models for executing tasks and tools, facilitating sophisticated agent-tool interactions.
- **Process Flow (`process`)**: Defines the execution logic (e.g., sequential, hierarchical) to streamline task distribution and execution.
- **Verbose Logging (`verbose`)**: Offers detailed logging capabilities for monitoring and debugging purposes.
- **Configuration (`config`)**: Allows extensive customization to tailor the crew's behavior according to specific requirements.
- **Rate Limiting (`max_rpm`)**: Ensures efficient utilization of resources by limiting requests per minute.
- **Internationalization Support (`language`)**: Facilitates operation in multiple languages, enhancing global usability.
- **Execution and Output Handling (`full_output`)**: Distinguishes between full and final outputs for nuanced control over task results.
- **Callback and Telemetry (`step_callback`)**: Integrates callbacks for step-wise execution monitoring and telemetry for performance analytics.
- **Crew Sharing (`share_crew`)**: Enables sharing of crew information with CrewAI for continuous improvement.
## Delegation: Dividing to Conquer
Delegation enhances functionality by allowing agents to intelligently assign tasks or seek help, thereby amplifying the crew's overall capability.
## Implementing Collaboration and Delegation
Setting up a crew involves defining the roles and capabilities of each agent. CrewAI seamlessly manages their interactions, ensuring efficient collaboration and delegation.
Setting up a crew involves defining the roles and capabilities of each agent. CrewAI seamlessly manages their interactions, ensuring efficient collaboration and delegation, with enhanced customization and monitoring features to adapt to various operational needs.
## Example Scenario
Imagine a crew with a researcher agent tasked with data gathering and a writer agent responsible for compiling reports. The writer can delegate research tasks or ask questions to the researcher, facilitating a seamless workflow.
Consider a crew with a researcher agent tasked with data gathering and a writer agent responsible for compiling reports. The integration of advanced language model management and process flow attributes allows for more sophisticated interactions, such as the writer delegating complex research tasks to the researcher or querying specific information, thereby facilitating a seamless workflow.
## Conclusion
Collaboration and delegation are pivotal, transforming individual AI agents into a coherent, intelligent crew capable of tackling complex tasks. CrewAI's framework not only simplifies these interactions but enhances their effectiveness, paving the way for sophisticated AI-driven solutions.
The integration of advanced attributes and functionalities into the CrewAI framework significantly enriches the agent collaboration ecosystem. These enhancements not only simplify interactions but also offer unprecedented flexibility and control, paving the way for sophisticated AI-driven solutions capable of tackling complex tasks through intelligent collaboration and delegation.

View File

@@ -15,18 +15,17 @@ description: Understanding and utilizing crews in the crewAI framework.
| **Agents** | A list of agents that are part of the crew. |
| **Process** | The process flow (e.g., sequential, hierarchical) the crew follows. |
| **Verbose** | The verbosity level for logging during execution. |
| **Manager LLM** | The language model used by the manager agent in a hierarchical process. |
| **Function Calling LLM** | The language model used by all agensts in the crew to call functions, if none is passed the same main llm for each agent will be used. |
| **Config** | Configuration settings for the crew. |
| **Manager LLM** | The language model used by the manager agent in a hierarchical process. **Required when using a hierarchical process.** |
| **Function Calling LLM** | The language model used by all agents in the crew for calling functions. If none is passed, the main LLM for each agent will be used. |
| **Config** | Optional configuration settings for the crew, in `Json` or `Dict[str, Any]` format. |
| **Max RPM** | Maximum requests per minute the crew adheres to during execution. |
| **Language** | Language setting for the crew's operation. |
| **Full Output** | Whether the crew should return the full output with all tasks outputs or just the final output. |
| **Step Callback** | A function that is called after each step of every agent. This can be used to log the agent's actions or to perform other operations, it won't override the agent specific `step_callback` |
| **Share Crew** | Whether you want to share the complete crew infromation and execution with the crewAI team to make the library better, and allow us to train models. |
| **Language** | Language used for the crew, defaults to English. |
| **Full Output** | Whether the crew should return the full output with all tasks outputs or just the final output. |
| **Step Callback** | A function that is called after each step of every agent. This can be used to log the agent's actions or to perform other operations; it won't override the agent-specific `step_callback`. |
| **Share Crew** | Whether you want to share the complete crew information and execution with the crewAI team to make the library better, and allow us to train models. |
!!! note "Crew Max RPM"
The `max_rpm` attribute sets the maximum number of requests per minute the crew can perform to avoid rate limits and will override individual agents `max_rpm` settings if you set it.
The `max_rpm` attribute sets the maximum number of requests per minute the crew can perform to avoid rate limits and will override individual agents' `max_rpm` settings if you set it.
## Creating a Crew
@@ -68,7 +67,7 @@ my_crew = Crew(
## Crew Execution Process
- **Sequential Process**: Tasks are executed one after another, allowing for a linear flow of work.
- **Hierarchical Process**: A manager agent coordinates the crew, delegating tasks and validating outcomes before proceeding.
- **Hierarchical Process**: A manager agent coordinates the crew, delegating tasks and validating outcomes before proceeding. **Note**: A `manager_llm` is required for this process.
### Kicking Off a Crew

View File

@@ -1,23 +1,23 @@
---
title: Managing Processes in CrewAI
description: An overview of workflow management through processes in CrewAI.
description: Detailed guide on workflow management through processes in CrewAI, with updated implementation details.
---
## Understanding Processes
!!! note "Core Concept"
Processes in CrewAI orchestrate how tasks are executed by agents, akin to project management in human teams. They ensure tasks are distributed and completed efficiently, according to a predefined game plan.
In CrewAI, processes orchestrate the execution of tasks by agents, akin to project management in human teams. These processes ensure tasks are distributed and executed efficiently, in alignment with a predefined strategy.
## Process Implementations
- **Sequential**: Executes tasks one after another, ensuring a linear and orderly progression.
- **Hierarchical**: Implements a chain of command, where tasks are delegated and executed based on a managerial structure.
- **Consensual (WIP)**: Future process type aiming for collaborative decision-making among agents on task execution.
- **Sequential**: Executes tasks sequentially, ensuring tasks are completed in an orderly progression.
- **Hierarchical**: Organizes tasks in a managerial hierarchy, where tasks are delegated and executed based on a structured chain of command, the manager for delegation is automatically created by crewAI.
- **Consensual (Planned)**: A future process type aiming for collaborative decision-making among agents on task execution, introducing a more democratic approach to task management within CrewAI.
## The Role of Processes in Teamwork
Processes transform individual agents into a unified team, coordinating their efforts to achieve common goals with efficiency and harmony.
Processes enable individual agents to operate as a cohesive unit, streamlining their efforts to achieve common objectives with efficiency and coherence.
## Assigning Processes to a Crew
Specify the process during crew creation to determine the execution strategy:
Specify the process type upon crew creation to set the execution strategy:
```python
from crewai import Crew
@@ -29,20 +29,21 @@ crew = Crew(agents=my_agents, tasks=my_tasks, process=Process.sequential)
# Example: Creating a crew with a hierarchical process
crew = Crew(agents=my_agents, tasks=my_tasks, process=Process.hierarchical)
```
**Note:** Ensure `my_agents` and `my_tasks` are defined prior to creating a `Crew` object.
## Sequential Process
Ensures a natural flow of work, mirroring human team dynamics by progressing through tasks thoughtfully and systematically.
This method mirrors dynamic team workflows, progressing through tasks in a thoughtful and systematic manner. Task execution follows the predefined order in the task list, with the output of one task serving as context for the next.
Tasks need to be pre-assigned to agents, and the order of execution is determined by the order of the tasks in the list.
Tasks are executed one after another, ensuring a linear and orderly progression and the output of one task is automatically used as context into the next task.
You can also define specific task's outputs that should be used as context for another task by using the `context` parameter in the `Task` class.
To customize task context, utilize the `context` parameter in the `Task` class to specify outputs that should be used as context for subsequent tasks.
## Hierarchical Process
Mimics a corporate hierarchy, where a manager oversees task execution, planning, delegation, and validation, enhancing task coordination.
Emulates a corporate hierarchy. A "manager" agent is automatically created so it oversees task execution, including planning, delegation, and validation. Tasks are not pre-assigned; the manager allocates tasks to agents, reviews outputs, and assesses task completion.
In this process tasks don't need to be pre-assigned to agents, the manager will decide which agent will perform each task, review the output and decide if the task is completed or not.
## Process Class: Detailed Overview
The `Process` class is implemented as an enumeration (`Enum`), ensuring type safety and restricting process values to the defined types (`sequential` and `hierarchical`). This design choice guarantees that only valid processes are utilized within the CrewAI framework.
## Planned Future Processes
- **Consensual Process**: A collaborative decision-making process among agents on task execution is planned but not currently implemented. This future enhancement will introduce a more democratic approach to task management within CrewAI.
## Conclusion
Processes are vital for structured collaboration within CrewAI, enabling agents to work together systematically. Future updates will introduce new processes, further mimicking the adaptability and complexity of human teamwork.
The structured collaboration facilitated by processes within CrewAI is crucial for enabling systematic teamwork among agents. Documentation will be updated to reflect new processes and enhancements, ensuring users have access to the most current and comprehensive information.

View File

@@ -5,20 +5,20 @@ description: Overview and management of tasks within the crewAI framework.
## Overview of a Task
!!! note "What is a Task?"
In the CrewAI framework, tasks are individual assignments that agents complete. They encapsulate necessary information for execution, including a description, assigned agent, and required tools, offering flexibility for various action complexities.
In the CrewAI framework, tasks are individual assignments that agents complete. They encapsulate necessary information for execution, including a description, assigned agent, required tools, offering flexibility for various action complexities.
Tasks in CrewAI can be designed to require collaboration between agents. For example, one agent might gather data while another analyzes it. This collaborative approach can be defined within the task properties and managed by the Crew's process.
## Task Attributes
| Attribute | Description |
| :---------- | :----------------------------------- |
| :------------- | :----------------------------------- |
| **Description** | A clear, concise statement of what the task entails. |
| **Agent** | Optionally, you can specify which agent is responsible for the task. If not, the crew's process will determine who takes it on. |
| **Expected Output** *(optional)* | Clear and detailed definition of expected output for the task. |
| **Tools** *(optional)* | These are the functions or capabilities the agent can utilize to perform the task. They can be anything from simple actions like 'search' to more complex interactions with other agents or APIs. |
| **Async Execution** *(optional)* | If the task should be executed asynchronously. |
| **Context** *(optional)* | Other tasks that will have their output used as context for this task, if one is an asynchronous task it will wait for that to finish |
| **Async Execution** *(optional)* | If the task should be executed asynchronously. This indicates that the crew will not wait for the task to be completed to continue with the next task. |
| **Context** *(optional)* | Other tasks that will have their output used as context for this task. If a task is asynchronous, the system will wait for that to finish before using its output as context. |
| **Output JSON** *(optional)* | Takes a pydantic model and returns the output as a JSON object. **Agent LLM needs to be using OpenAI client, could be Ollama for example but using the OpenAI wrapper** |
| **Output Pydantic** *(optional)* | Takes a pydantic model and returns the output as a pydantic object. **Agent LLM needs to be using OpenAI client, could be Ollama for example but using the OpenAI wrapper** |
| **Output File** *(optional)* | Takes a file path and saves the output of the task on it. |
@@ -26,18 +26,18 @@ Tasks in CrewAI can be designed to require collaboration between agents. For exa
## Creating a Task
This is the simpliest example for creating a task, it involves defining its scope and agent, but there are optional attributes that can provide a lot of flexibility:
This is the simplest example for creating a task, it involves defining its scope and agent, but there are optional attributes that can provide a lot of flexibility:
```python
from crewai import Task
task = Task(
description='Find and summarize the latest and most relevant news on AI',
agent=sales_agent
description='Find and summarize the latest and most relevant news on AI',
agent=sales_agent
)
```
!!! note "Task Assignment"
Tasks can be assigned directly by specifying an `agent` to them, or they can be assigned in run time if you are using the `hierarchical` through CrewAI's process, considering roles, availability, or other criteria.
Tasks can be assigned directly by specifying an `agent` to them, or they can be assigned in run time if you are using the `hierarchical` through CrewAI's process, considering roles, availability, or other criteria.
## Integrating Tools with Tasks
@@ -54,12 +54,12 @@ from langchain.agents import Tool
from langchain_community.tools import DuckDuckGoSearchRun
research_agent = Agent(
role='Researcher',
goal='Find and summarize the latest AI news',
backstory="""You're a researcher at a large company.
You're responsible for analyzing data and providing insights
to the business."""
verbose=True
role='Researcher',
goal='Find and summarize the latest AI news',
backstory="""You're a researcher at a large company.
You're responsible for analyzing data and providing insights
to the business."""
verbose=True
)
# Install duckduckgo-search for this example:
@@ -68,15 +68,15 @@ search_tool = DuckDuckGoSearchRun()
task = Task(
description='Find and summarize the latest AI news',
expected_output='A bullet list summary of the top 5 most important AI news',
agent=research_agent,
expected_output='A bullet list summary of the top 5 most important AI news',
agent=research_agent,
tools=[search_tool]
)
crew = Crew(
agents=[research_agent],
tasks=[task],
verbose=2
agents=[research_agent],
tasks=[task],
verbose=2
)
result = crew.kickoff()
@@ -85,9 +85,9 @@ print(result)
This demonstrates how tasks with specific tools can override an agent's default set for tailored task execution.
## Refering other Tasks
## Referring to Other Tasks
In crewAI the output of one task is automatically relayed into the next one, but you can specifically define what tasks output should be used as context for another task.
In crewAI, the output of one task is automatically relayed into the next one, but you can specifically define what tasks' output should be used as context for another task.
This is useful when you have a task that depends on the output of another task that is not performed immediately after it. This is done through the `context` attribute of the task:
@@ -96,15 +96,15 @@ This is useful when you have a task that depends on the output of another task t
research_task = Task(
description='Find and summarize the latest AI news',
expected_output='A bullet list summary of the top 5 most important AI news',
agent=research_agent,
expected_output='A bullet list summary of the top 5 most important AI news',
agent=research_agent,
tools=[search_tool]
)
write_blog_task = Task(
description="Write a full blog post about the importante of AI and it's latest news",
expected_output='Full blog post that is 4 paragraphs long',
agent=writer_agent,
description="Write a full blog post about the importance of AI and its latest news",
expected_output='Full blog post that is 4 paragraphs long',
agent=writer_agent,
context=[research_task]
)
@@ -113,7 +113,7 @@ write_blog_task = Task(
## Asynchronous Execution
You can define a task to be executed asynchronously, this means that the crew will not wait for it to be completed to continue with the next task. This is useful for tasks that take a long time to be completed, or that are not crucial for the next tasks to be performed.
You can define a task to be executed asynchronously. This means that the crew will not wait for it to be completed to continue with the next task. This is useful for tasks that take a long time to be completed, or that are not crucial for the next tasks to be performed.
You can then use the `context` attribute to define in a future task that it should wait for the output of the asynchronous task to be completed.
@@ -121,7 +121,7 @@ You can then use the `context` attribute to define in a future task that it shou
#...
list_ideas = Task(
description="List of 5 interesting ideas to explore for na article about AI.",
description="List of 5 interesting ideas to explore for an article about AI.",
expected_output="Bullet point list of 5 ideas for an article.",
agent=researcher,
async_execution=True # Will be executed asynchronously
@@ -135,7 +135,7 @@ list_important_history = Task(
)
write_article = Task(
description="Write an article about AI, it's history and interesting ideas.",
description="Write an article about AI, its history, and interesting ideas.",
expected_output="A 4 paragraph article about AI.",
agent=writer,
context=[list_ideas, list_important_history] # Will wait for the output of the two tasks to be completed
@@ -157,7 +157,7 @@ def callback_function(output: TaskOutput):
print(f"""
Task completed!
Task: {output.description}
Output: {output.raw_ouput}
Output: {output.raw_output}
""")
research_task = Task(
@@ -171,7 +171,7 @@ research_task = Task(
#...
```
## Accessing a specific Task Output
## Accessing a Specific Task Output
Once a crew finishes running, you can access the output of a specific task by using the `output` attribute of the task object:
@@ -198,18 +198,24 @@ result = crew.kickoff()
print(f"""
Task completed!
Task: {task1.output.description}
Output: {task1.output.raw_ouput}
Output: {task1.output.raw_output}
""")
```
## Tool Override Mechanism
Specifying tools in a task allows for dynamic adaptation of agent capabilities, emphasizing CrewAI's flexibility.
## Error Handling and Validation Mechanisms
While creating and executing tasks, certain validation mechanisms are in place to ensure the robustness and reliability of task attributes. These include but are not limited to:
- Ensuring only one output type is set per task to maintain clear output expectations.
- Preventing the manual assignment of the `id` attribute to uphold the integrity of the unique identifier system.
These validations help in maintaining the consistency and reliability of task executions within the crewAI framework.
## Conclusion
Tasks are the driving force behind the actions of agents in crewAI. By properly defining tasks and their outcomes, you set the stage for your AI agents to work effectively, either independently or as a collaborative unit.
Equipping tasks with appropriate tools is crucial for maximizing CrewAI's potential, ensuring agents are effectively prepared for their assignments.
Equipping tasks with appropriate tools and following robust validation practices is crucial for maximizing CrewAI's potential, ensuring agents are effectively prepared for their assignments and that tasks are executed as intended.

View File

@@ -1,65 +1,212 @@
---
title: crewAI Tools
description: Understanding and leveraging tools within the crewAI framework.
description: Understanding and leveraging tools within the crewAI framework for agent collaboration and task execution.
---
## Introduction
CrewAI tools empower agents with capabilities ranging from web searching and data analysis to collaboration and delegating tasks among coworkers. This documentation outlines how to create, integrate, and leverage these tools within the CrewAI framework, including a new focus on collaboration tools.
## What is a Tool?
!!! note "Definition"
A tool in CrewAI, is a skill, something Agents can use perform tasks, right now those can be tools from the [crewAI Toolkit](https://github.com/joaomdmoura/crewai-tools) and [LangChain Tools](https://python.langchain.com/docs/integrations/tools), those are basically functions that an agent can utilize for various actions, from simple searches to complex interactions with external systems.
A tool in CrewAI is a skill or function that agents can utilize to perform various actions. This includes tools from the [crewAI Toolkit](https://github.com/joaomdmoura/crewai-tools) and [LangChain Tools](https://python.langchain.com/docs/integrations/tools), enabling everything from simple searches to complex interactions and effective teamwork among agents.
## Key Characteristics of Tools
- **Utility**: Designed for specific tasks such as web searching, data analysis, or content generation.
- **Integration**: Enhance agent capabilities by integrating tools directly into their workflow.
- **Customizability**: Offers the flexibility to develop custom tools or use existing ones from LangChain's ecosystem.
- **Utility**: Designed for various tasks such as web searching, data analysis, content generation, and agent collaboration.
- **Integration**: Enhances agent capabilities by integrating tools directly into their workflow.
- **Customizability**: Offers flexibility to develop custom tools or use existing ones, catering to specific agent needs.
## Using crewAI Tools
crewAI comes with a series to built-in tools that can be used to extend the capabilities of your agents. Start by installing our extra tools package:
```bash
pip install 'crewai[tools]'
```
Here is an example on how to use them:
```python
import os
from crewai import Agent, Task, Crew
# Importing some of the crewAI tools
from crewai_tools import (
DirectoryReadTool,
FileReadTool,
SeperDevTool,
WebsiteSearchTool
)
# get a free account in serper.dev
os.environ["SERPER_API_KEY"] = "Your Key"
os.environ["OPENAI_API_KEY"] = "Your Key"
# Instantiate tools
# Assumes this ./blog-posts exists with existing blog posts on it
docs_tools = DirectoryReadTool(directory='./blog-posts')
file_read_tool = FileReadTool()
search_tool = SeperDevTool()
website_rag = WebsiteSearchTool()
# Create agents
researcher = Agent(
role='Market Research Analyst',
goal='Provide up-to-date market analysis of the AI industry',
backstory='An expert analyst with a keen eye for market trends.',
tools=[search_tool, website_rag],
verbose=True
)
writer = Agent(
role='Content Writer',
goal='Write amazing, super engaging blog post about the AI industry',
backstory='A skilled writer with a passion for technology.',
tools=[docs_tools, file_read_tool],
verbose=True
)
# Create tasks
research = Task(
description='Research the AI industry and provide a summary of the latest most trending matters and developments.',
expected_output='A summary of the top 3 latest most trending matters and developments in the AI industry with you unique take on why they matter.',
agent=researcher
)
write = Task(
description='Write an engaging blog post about the AI industry, using the summary provided by the research analyst. Read the latest blog posts in the directory to get inspiration.',
expected_output='A 4 paragraph blog post formatted as markdown with proper subtitles about the latest trends that is engaging and informative and funny, avoid complex words and make it easy to read.',
agent=writer,
output_file='blog-posts/new_post.md' # The final blog post will be written here
)
# Create a crew
crew = Crew(
agents=[researcher, writer],
tasks=[research, write],
verbose=2
)
# Execute the tasks
crew.kickoff()
```
## Available crewAI Tools
Most of the tools in the crewAI toolkit offer the ability to set specific arguments or let them to be more wide open, this is the case for most of the tools, for example:
```python
from crewai_tools import DirectoryReadTool
# This will allow the agent with this tool to read any directory it wants during it's execution
tool = DirectoryReadTool()
# OR
# This will allow the agent with this tool to read only the directory specified during it's execution
toos = DirectoryReadTool(directory='./directory')
```
Specific per tool docs are coming soon.
Here is a list of the available tools and their descriptions:
| Tool | Description |
| :-------------------------- | :-------------------------------------------------------------------------------------------- |
| **CodeDocsSearchTool** | A RAG tool optimized for searching through code documentation and related technical documents.|
| **CSVSearchTool** | A RAG tool designed for searching within CSV files, tailored to handle structured data. |
| **DirectorySearchTool** | A RAG tool for searching within directories, useful for navigating through file systems. |
| **DOCXSearchTool** | A RAG tool aimed at searching within DOCX documents, ideal for processing Word files. |
| **DirectoryReadTool** | Facilitates reading and processing of directory structures and their contents. |
| **FileReadTool** | Enables reading and extracting data from files, supporting various file formats. |
| **GithubSearchTool** | A RAG tool for searching within GitHub repositories, useful for code and documentation search.|
| **SeperDevTool** | A specialized tool for development purposes, with specific functionalities under development. |
| **TXTSearchTool** | A RAG tool focused on searching within text (.txt) files, suitable for unstructured data. |
| **JSONSearchTool** | A RAG tool designed for searching within JSON files, catering to structured data handling. |
| **MDXSearchTool** | A RAG tool tailored for searching within Markdown (MDX) files, useful for documentation. |
| **PDFSearchTool** | A RAG tool aimed at searching within PDF documents, ideal for processing scanned documents. |
| **PGSearchTool** | A RAG tool optimized for searching within PostgreSQL databases, suitable for database queries. |
| **RagTool** | A general-purpose RAG tool capable of handling various data sources and types. |
| **ScrapeElementFromWebsiteTool** | Enables scraping specific elements from websites, useful for targeted data extraction. |
| **ScrapeWebsiteTool** | Facilitates scraping entire websites, ideal for comprehensive data collection. |
| **WebsiteSearchTool** | A RAG tool for searching website content, optimized for web data extraction. |
| **XMLSearchTool** | A RAG tool designed for searching within XML files, suitable for structured data formats. |
| **YoutubeChannelSearchTool**| A RAG tool for searching within YouTube channels, useful for video content analysis. |
| **YoutubeVideoSearchTool** | A RAG tool aimed at searching within YouTube videos, ideal for video data extraction. |
## Creating your own Tools
!!! example "Custom Tool Creation"
Developers can craft custom tools tailored for their agents needs or utilize pre-built options. Heres how to create one:
Developers can craft custom tools tailored for their agents needs or utilize pre-built options:
To create your own crewAI tools you will need to install our extra tools package:
```bash
pip install 'crewai[tools]'
```
Once you do that there are two main ways for one to create a crewAI tool:
### Subclassing `BaseTool`
```python
from crewai_tools import BaseTool
class MyCustomTool(BaseTool):
name: str = "Name of my tool"
description: str = "Clear description for what this tool is useful for, you agent will need this information to use it."
def _run(self, argument: str) -> str:
# Implementation goes here
pass
```
Define a new class inheriting from `BaseTool`, specifying `name`, `description`, and the `_run` method for operational logic.
### Utilizing the `tool` Decorator
For a simpler approach, create a `Tool` object directly with the required attributes and a functional logic.
```python
from crewai_tools import tool
@tool("Name of my tool")
def my_tool(question: str) -> str:
"""Clear description for what this tool is useful for, you agent will need this information to use it."""
# Function logic here
```
```python
import json
import requests
from crewai import Agent
from langchain.tools import tool
from crewai.tools import tool
from unstructured.partition.html import partition_html
class BrowserTools():
# Anotate the fuction with the tool decorator from LangChain
@tool("Scrape website content")
def scrape_website(website):
# Write logic for the tool.
# In this case a function to scrape website content
url = f"https://chrome.browserless.io/content?token={config('BROWSERLESS_API_KEY')}"
payload = json.dumps({"url": website})
headers = {'cache-control': 'no-cache', 'content-type': 'application/json'}
response = requests.request("POST", url, headers=headers, data=payload)
elements = partition_html(text=response.text)
content = "\n\n".join([str(el) for el in elements])
return content[:5000]
# Annotate the function with the tool decorator from crewAI
@tool("Integration with a given API")
def integtation_tool(argument: str) -> str:
"""Integration with a given API"""
# Code here
return resutls # string to be sent back to the agent
# Assign the scraping tool to an agent
agent = Agent(
role='Research Analyst',
goal='Provide up-to-date market analysis',
backstory='An expert analyst with a keen eye for market trends.',
tools=[BrowserTools().scrape_website]
role='Research Analyst',
goal='Provide up-to-date market analysis',
backstory='An expert analyst with a keen eye for market trends.',
tools=[integtation_tool]
)
```
## Using LangChain Tools
!!! info "LangChain Integration"
CrewAI seamlessly integrates with LangChains comprehensive toolkit. Assigning an existing tool to an agent is straightforward:
CrewAI seamlessly integrates with LangChains comprehensive toolkit for search-based queries and more:
```python
from crewai import Agent
from langchain.agents import Tool
from langchain.utilities import GoogleSerperAPIWrapper
import os
# Setup API keys
os.environ["OPENAI_API_KEY"] = "Your Key"
os.environ["SERPER_API_KEY"] = "Your Key"
search = GoogleSerperAPIWrapper()
@@ -77,6 +224,8 @@ agent = Agent(
backstory='An expert analyst with a keen eye for market trends.',
tools=[serper_tool]
)
# rest of the code ...
```
## Conclusion

View File

@@ -1,112 +1,113 @@
---
title: Assembling and Activating Your CrewAI Team
description: A step-by-step guide to creating a cohesive CrewAI team for your projects.
description: A comprehensive guide to creating a dynamic CrewAI team for your projects, with updated functionalities including verbose mode, memory capabilities, and more.
---
## Introduction
Embarking on your CrewAI journey involves a few straightforward steps to set up your environment and initiate your AI crew. This guide ensures a seamless start.
Embark on your CrewAI journey by setting up your environment and initiating your AI crew with enhanced features. This guide ensures a seamless start, incorporating the latest updates.
## Step 0: Installation
Begin by installing CrewAI and any additional packages required for your project. For instance, the `duckduckgo-search` package is used in this example for enhanced search capabilities.
Install CrewAI and any necessary packages for your project. The `duckduckgo-search` package is highlighted here for enhanced search capabilities.
```shell
pip install crewai
pip install crewai[tools]
pip install duckduckgo-search
```
## Step 1: Assemble Your Agents
Begin by defining your agents with distinct roles and backstories. These elements not only add depth but also guide their task execution and interaction within the crew.
Define your agents with distinct roles, backstories, and now, enhanced capabilities such as verbose mode and memory usage. These elements add depth and guide their task execution and interaction within the crew.
```python
import os
os.environ["OPENAI_API_KEY"] = "Your Key"
from crewai import Agent
from langchain_community.tools import DuckDuckGoSearchRun
search_tool = DuckDuckGoSearchRun()
# Topic that will be used in the crew run
# Topic for the crew run
topic = 'AI in healthcare'
# Creating a senior researcher agent
# Creating a senior researcher agent with memory and verbose mode
researcher = Agent(
role='Senior Researcher',
goal=f'Uncover groundbreaking technologies around {topic}',
goal=f'Uncover groundbreaking technologies in {topic}',
verbose=True,
memory=True,
backstory="""Driven by curiosity, you're at the forefront of
innovation, eager to explore and share knowledge that could change
the world."""
the world.""",
tools=[search_tool],
allow_delegation=True
)
# Creating a writer agent
# Creating a writer agent with custom tools and delegation capability
writer = Agent(
role='Writer',
goal=f'Narrate compelling tech stories around {topic}',
goal=f'Narrate compelling tech stories about {topic}',
verbose=True,
memory=True,
backstory="""With a flair for simplifying complex topics, you craft
engaging narratives that captivate and educate, bringing new
discoveries to light in an accessible manner."""
discoveries to light in an accessible manner.""",
tools=[search_tool],
allow_delegation=False
)
```
## Step 2: Define the Tasks
Detail the specific objectives for your agents. These tasks guide their focus and ensure a targeted approach to their roles.
Detail the specific objectives for your agents, including new features for asynchronous execution and output customization. These tasks ensure a targeted approach to their roles.
```python
from crewai import Task
# Install duckduckgo-search for this example:
# !pip install -U duckduckgo-search
from langchain_community.tools import DuckDuckGoSearchRun
search_tool = DuckDuckGoSearchRun()
# Research task for identifying AI trends
# Research task
research_task = Task(
description=f"""Identify the next big trend in {topic}.
Focus on identifying pros and cons and the overall narrative.
Your final report should clearly articulate the key points,
its market opportunities, and potential risks.
""",
its market opportunities, and potential risks.""",
expected_output='A comprehensive 3 paragraphs long report on the latest AI trends.',
max_inter=3,
tools=[search_tool],
agent=researcher
agent=researcher,
)
# Writing task based on research findings
# Writing task with language model configuration
write_task = Task(
description=f"""Compose an insightful article on {topic}.
Focus on the latest trends and how it's impacting the industry.
This article should be easy to understand, engaging and positive.
""",
expected_output=f'A 4 paragraph article on {topic} advancements.',
This article should be easy to understand, engaging, and positive.""",
expected_output=f'A 4 paragraph article on {topic} advancements fromated as markdown.',
tools=[search_tool],
agent=writer
agent=writer,
async_execution=False,
output_file='new-blog-post.md' # Example of output customization
)
```
## Step 3: Form the Crew
Combine your agents into a crew, setting the workflow process they'll follow to accomplish the tasks.
Combine your agents into a crew, setting the workflow process they'll follow to accomplish the tasks, now with the option to configure language models for enhanced interaction.
```python
from crewai import Crew, Process
# Forming the tech-focused crew
# Forming the tech-focused crew with enhanced configurations
crew = Crew(
agents=[researcher, writer],
tasks=[research_task, write_task],
process=Process.sequential # Sequential task execution
process=Process.sequential # Optional: Sequential task execution is default
)
```
## Step 4: Kick It Off
With your crew ready and the stage set, initiate the process. Watch as your agents collaborate, each contributing their expertise to achieve the collective goal.
Initiate the process with your enhanced crew ready. Observe as your agents collaborate, leveraging their new capabilities for a successful project outcome.
```python
# Starting the task execution process
# Starting the task execution process with enhanced feedback
result = crew.kickoff()
print(result)
```
## Conclusion
Building and activating a crew in CrewAI is a seamless process. By carefully assigning roles, tasks, and a clear process, your AI team is equipped to tackle challenges efficiently. The depth of agent backstories and the precision of their objectives enrich the collaboration, leading to successful project outcomes.
Building and activating a crew in CrewAI has evolved with new functionalities. By incorporating verbose mode, memory capabilities, asynchronous task execution, output customization, and language model configuration, your AI team is more equipped than ever to tackle challenges efficiently. The depth of agent backstories and the precision of their objectives enrich collaboration, leading to successful project outcomes.

View File

@@ -1,55 +1,72 @@
---
title: Customizing Agents in CrewAI
description: A guide to tailoring agents for specific roles and tasks within the CrewAI framework.
description: A comprehensive guide to tailoring agents for specific roles, tasks, and advanced customizations within the CrewAI framework.
---
## Customizable Attributes
Tailoring your AI agents is pivotal in crafting an efficient CrewAI team. Customization allows agents to be dynamically adapted to the unique requirements of any project.
Crafting an efficient CrewAI team hinges on the ability to tailor your AI agents dynamically to meet the unique requirements of any project. This section covers the foundational attributes you can customize.
### Key Attributes for Customization
- **Role**: Defines the agent's job within the crew, such as 'Analyst' or 'Customer Service Rep'.
- **Goal**: The agent's objective, aligned with its role and the crew's overall goals.
- **Backstory**: Adds depth to the agent's character, enhancing its role and motivations within the crew.
- **Tools**: The capabilities or methods the agent employs to accomplish tasks, ranging from simple functions to complex integrations.
- **Role**: Specifies the agent's job within the crew, such as 'Analyst' or 'Customer Service Rep'.
- **Goal**: Defines what the agent aims to achieve, in alignment with its role and the overarching objectives of the crew.
- **Backstory**: Provides depth to the agent's persona, enriching its motivations and engagements within the crew.
- **Tools**: Represents the capabilities or methods the agent uses to perform tasks, from simple functions to intricate integrations.
## Understanding Tools in CrewAI
Tools empower agents with functionalities to interact and manipulate their environment, from generic utilities to specialized functions. Integrating with LangChain offers access to a broad range of tools for diverse tasks.
## Advanced Customization Options
Beyond the basic attributes, CrewAI allows for deeper customization to enhance an agent's behavior and capabilities significantly.
### Language Model Customization
Agents can be customized with specific language models (`llm`) and function-calling language models (`function_calling_llm`), offering advanced control over their processing and decision-making abilities.
### Enabling Memory for Agents
CrewAI supports memory for agents, enabling them to remember past interactions. This feature is critical for tasks requiring awareness of previous contexts or decisions.
## Performance and Debugging Settings
Adjusting an agent's performance and monitoring its operations are crucial for efficient task execution.
### Verbose Mode and RPM Limit
- **Verbose Mode**: Enables detailed logging of an agent's actions, useful for debugging and optimization.
- **RPM Limit**: Sets the maximum number of requests per minute (`max_rpm`), controlling the agent's query frequency to external services.
### Maximum Iterations for Task Execution
The `max_iter` attribute allows users to define the maximum number of iterations an agent can perform for a single task, preventing infinite loops or excessively long executions.
## Customizing Agents and Tools
Agents are customized by defining their attributes during initialization, with tools being a critical aspect of their functionality.
Agents are customized by defining their attributes and tools during initialization. Tools are critical for an agent's functionality, enabling them to perform specialized tasks. In this example we will use the crewAI tools package to create a tool for a research analyst agent.
```shell
pip install 'crewai[tools]'
```
### Example: Assigning Tools to an Agent
```python
from crewai import Agent
from langchain.agents import Tool
from langchain.utilities import GoogleSerperAPIWrapper
import os
from crewai import Agent
from crewai_tools import SeperDevTool
# Set API keys for tool initialization
os.environ["OPENAI_API_KEY"] = "Your Key"
os.environ["SERPER_API_KEY"] = "Your Key"
# Initialize a search tool
search_tool = GoogleSerperAPIWrapper()
search_tool = SeperDevTool()
# Define and assign the tool to an agent
serper_tool = Tool(
name="Intermediate Answer",
func=search_tool.run,
description="Useful for search-based queries"
)
# Initialize the agent with the tool
# Initialize the agent with advanced options
agent = Agent(
role='Research Analyst',
goal='Provide up-to-date market analysis',
backstory='An expert analyst with a keen eye for market trends.',
tools=[serper_tool]
tools=[serper_tool],
memory=True,
verbose=True,
max_rpm=10, # Optinal: Limit requests to 10 per minute, preventing API abuse
max_iter=5, # Optional: Limit task iterations to 5 before the agent tried to gives its best answer
allow_delegation=False
)
```
## Delegation and Autonomy
Agents in CrewAI can delegate tasks or ask questions, enhancing the crew's collaborative dynamics. This feature can be disabled to ensure straightforward task execution.
Controlling an agent's ability to delegate tasks or ask questions is vital for tailoring its autonomy and collaborative dynamics within the CrewAI framework.
### Example: Disabling Delegation for an Agent
```python
@@ -62,4 +79,4 @@ agent = Agent(
```
## Conclusion
Customizing agents is key to leveraging the full potential of CrewAI. By thoughtfully setting agents' roles, goals, backstories, and tools, you craft a nuanced and capable AI team ready to tackle complex challenges.
Customizing agents in CrewAI by setting their roles, goals, backstories, and tools, alongside advanced options like language model customization, memory, and performance settings, equips a nuanced and capable AI team ready for complex challenges.

View File

@@ -1,36 +1,36 @@
---
title: Implementing the Hierarchical Process in CrewAI
description: Understanding and applying the hierarchical process within your CrewAI projects.
description: Understanding and applying the hierarchical process within your CrewAI projects, with updates reflecting the latest coding practices.
---
## Introduction
The hierarchical process in CrewAI introduces a structured approach to task management, mimicking traditional organizational hierarchies for efficient task delegation and execution.
The hierarchical process in CrewAI introduces a structured approach to managing tasks, mimicking traditional organizational hierarchies for efficient task delegation and execution. This ensures a systematic workflow that enhances project outcomes.
!!! note "Complexity"
The current implementation of the hierarchical process relies on tools usage that usually require more complex models like GPT-4 and usually imply of a higher token usage.
!!! note "Complexity and Efficiency"
The hierarchical process is designed to leverage advanced models like GPT-4, optimizing token usage while handling complex tasks with greater efficiency.
## Hierarchical Process Overview
In this process, tasks are assigned and executed based on a defined hierarchy, where a 'manager' agent coordinates the workflow, delegating tasks to other agents and validating their outcomes before proceeding.
Tasks within this process are managed through a clear hierarchy, where a 'manager' agent coordinates the workflow, delegates tasks, and validates outcomes, ensuring a streamlined and effective execution process.
### Key Features
- **Task Delegation**: A manager agent oversees task distribution among crew members.
- **Result Validation**: The manager reviews outcomes before passing tasks along, ensuring quality and relevance.
- **Efficient Workflow**: Mimics corporate structures for a familiar and organized task management approach.
- **Task Delegation**: A manager agent is responsible for allocating tasks among crew members based on their roles and capabilities.
- **Result Validation**: The manager evaluates the outcomes to ensure they meet the required standards before moving forward.
- **Efficient Workflow**: Emulates corporate structures, offering an organized and familiar approach to task management.
## Implementing the Hierarchical Process
To utilize the hierarchical process, you must define a crew with a designated manager and a clear chain of command for task execution.
To adopt the hierarchical process, define a crew with a designated manager and establish a clear chain of command for task execution. This structure is crucial for maintaining an orderly and efficient workflow.
!!! note "Tools on the hierarchical process"
For tools when using the hierarchical process, you want to make sure to assign them to the agents instead of the tasks, as the manager will be the one delegating the tasks and the agents will be the ones executing them.
!!! note "Tools and Agent Assignment"
Tools should be assigned at the agent level, not the task level, to facilitate task delegation and execution by the designated agents under the manager's guidance.
!!! note "Manager LLM"
A manager will be automatically set for the crew, you don't need to define it. You do need to set the `manager_llm` parameter in the crew though.
!!! note "Manager LLM Configuration"
A manager LLM is automatically assigned to the crew, eliminating the need for manual definition. However, configuring the `manager_llm` parameter is necessary to tailor the manager's decision-making process.
```python
from langchain_openai import ChatOpenAI
from crewai import Crew, Process, Agent
# Define your agents, no need to define a manager
# Agents are defined without specifying a manager explicitly
researcher = Agent(
role='Researcher',
goal='Conduct in-depth analysis',
@@ -42,19 +42,19 @@ writer = Agent(
# tools = [...]
)
# Form the crew with a hierarchical process
# Establishing the crew with a hierarchical process
project_crew = Crew(
tasks=[...], # Tasks that that manager will figure out how to complete
tasks=[...], # Tasks to be delegated and executed under the manager's supervision
agents=[researcher, writer],
manager_llm=ChatOpenAI(temperature=0, model="gpt-4"), # The manager's LLM that will be used internally
process=Process.hierarchical # Designating the hierarchical approach
manager_llm=ChatOpenAI(temperature=0, model="gpt-4"), # Defines the manager's decision-making engine
process=Process.hierarchical # Specifies the hierarchical management approach
)
```
### Workflow in Action
1. **Task Assignment**: The manager assigns tasks based on agent roles and capabilities.
2. **Execution and Review**: Agents perform their tasks, with the manager reviewing outcomes for approval.
3. **Sequential Task Progression**: Tasks are completed in a sequence dictated by the manager, ensuring orderly progression.
1. **Task Assignment**: The manager strategically assigns tasks, considering each agent's role and skills.
2. **Execution and Review**: Agents complete their tasks, followed by a thorough review by the manager to ensure quality standards.
3. **Sequential Task Progression**: The manager ensures tasks are completed in a logical order, facilitating smooth project progression.
## Conclusion
The hierarchical process in CrewAI offers a familiar, structured way to manage tasks within a project. By leveraging a chain of command, it enhances efficiency and quality control, making it ideal for complex projects requiring meticulous oversight.
Adopting the hierarchical process in CrewAI facilitates a well-organized and efficient approach to project management. By structuring tasks and delegations within a clear hierarchy, it enhances both productivity and quality control, making it an ideal strategy for managing complex projects.

View File

@@ -1,10 +1,12 @@
# Human Input on Execution
# Human Input in Agent Execution
Human inputs is important in many agent execution use cases, humans are AGI so they can can be prompted to step in and provide extra details ins necessary.
Using it with crewAI is pretty straightforward and you can do it through a LangChain Tool.
Check [LangChain Integration](https://python.langchain.com/docs/integrations/tools/human_tools) for more details:
Human input is crucial in numerous agent execution scenarios, enabling agents to request additional information or clarification when necessary. This feature is particularly useful in complex decision-making processes or when agents require further details to complete a task effectively.
Example:
## Using Human Input with CrewAI
Incorporating human input with CrewAI is straightforward, enhancing the agent's ability to make informed decisions. While the documentation previously mentioned using a "LangChain Tool" and a specific "DuckDuckGoSearchRun" tool from `langchain_community.tools`, it's important to clarify that the integration of such tools should align with the actual capabilities and configurations defined within your `Agent` class setup.
### Example:
```python
import os
@@ -20,15 +22,14 @@ human_tools = load_tools(["human"])
# Define your agents with roles and goals
researcher = Agent(
role='Senior Research Analyst',
goal='Uncover cutting-edge developments in AI and data science in',
goal='Uncover cutting-edge developments in AI and data science',
backstory="""You are a Senior Research Analyst at a leading tech think tank.
Your expertise lies in identifying emerging trends and technologies in AI and
data science. You have a knack for dissecting complex data and presenting
actionable insights.""",
verbose=True,
allow_delegation=False,
# Passing human tools to the agent
tools=[search_tool]+human_tools
tools=[search_tool]+human_tools # Passing human tools to the agent
)
writer = Agent(
role='Tech Content Strategist',
@@ -41,13 +42,12 @@ writer = Agent(
)
# Create tasks for your agents
# Being explicit on the task to ask for human feedback.
task1 = Task(
description="""Conduct a comprehensive analysis of the latest advancements in AI in 2024.
Identify key trends, breakthrough technologies, and potential industry impacts.
Compile your findings in a detailed report.
Make sure to check with the human if the draft is good before returning your Final Answer.
Your final answer MUST be a full analysis report""",
Make sure to check with a human if the draft is good before finalizing your answer.""",
expected_output='A comprehensive full report on the latest AI advancements in 2024, leave nothing out',
agent=researcher
)
@@ -58,6 +58,7 @@ task2 = Task(
Aim for a narrative that captures the essence of these breakthroughs and their
implications for the future.
Your final answer MUST be the full blog post of at least 3 paragraphs.""",
expected_output='A compelling 3 paragraphs blog post formated as markdown about the latest AI advancements in 2024',
agent=writer
)

View File

@@ -5,81 +5,71 @@ description: Guide on integrating CrewAI with various Large Language Models (LLM
## Connect CrewAI to LLMs
!!! note "Default LLM"
By default, crewAI uses OpenAI's GPT-4 model for language processing. However, you can configure your agents to use a different model or API. This guide will show you how to connect your agents to different LLMs. You can change the specific gpt model by setting the `OPENAI_MODEL_NAME` environment variable.
By default, CrewAI uses OpenAI's GPT-4 model for language processing. However, you can configure your agents to use a different model or API. This guide will show you how to connect your agents to different LLMs through environment variables and direct instantiation.
CrewAI offers flexibility in connecting to various LLMs, including local models via [Ollama](https://ollama.ai) and different APIs like Azure. It's compatible with all [LangChain LLM](https://python.langchain.com/docs/integrations/llms/) components, enabling diverse integrations for tailored AI solutions.
## CrewAI Agent Overview
The `Agent` class in CrewAI is central to implementing AI solutions. Here's a brief overview:
## Ollama Integration
Ollama is preferred for local LLM integration, offering customization and privacy benefits. It requires installation and configuration, including model adjustments via a Modelfile to optimize performance.
### Setting Up Ollama
- **Installation**: Follow Ollama's guide for setup.
- **Configuration**: [Adjust your local model with a Modelfile](https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md), considering adding `Result` as a stop word and playing with parameters like `top_p` and `temperature`.
### Integrating Ollama with CrewAI
Instantiate Ollama and pass it to your agents within CrewAI, enhancing them with the local model's capabilities.
- **Attributes**:
- `role`: Defines the agent's role within the solution.
- `goal`: Specifies the agent's objective.
- `backstory`: Provides a background story to the agent.
- `llm`: Indicates the Large Language Model the agent uses.
### Example Changing OpenAI's GPT model
```python
# Required
os.environ["OPENAI_API_BASE"]='http://localhost:11434/v1'
os.environ["OPENAI_MODEL_NAME"]='openhermes'
os.environ["OPENAI_API_KEY"]=''
os.environ["OPENAI_MODEL_NAME"]="gpt-4-0125-preview"
local_expert = Agent(
# Agent will automatically use the model defined in the environment variable
example_agent = Agent(
role='Local Expert',
goal='Provide insights about the city',
backstory="A knowledgeable local guide.",
tools=[SearchTools.search_internet, BrowserTools.scrape_and_summarize_website],
verbose=True
)
```
## OpenAI Compatible API Endpoints
You can use environment variables for easy switch between APIs and models, supporting diverse platforms like FastChat, LM Studio, and Mistral AI.
## Ollama Integration
Ollama is preferred for local LLM integration, offering customization and privacy benefits. To integrate Ollama with CrewAI, set the appropriate environment variables as shown below. Note: Detailed Ollama setup is beyond this document's scope, but general guidance is provided.
### Configuration Examples
### Ollama
### Setting Up Ollama
- **Environment Variables Configuration**: To integrate Ollama, set the following environment variables:
```sh
OPENAI_API_BASE='http://localhost:11434/v1'
OPENAI_MODEL_NAME='openhermes' # Depending on the model you have available
OPENAI_API_KEY=NA
OPENAI_MODEL_NAME='openhermes' # Adjust based on available model
OPENAI_API_KEY=''
```
### FastChat
## OpenAI Compatible API Endpoints
Switch between APIs and models seamlessly using environment variables, supporting platforms like FastChat, LM Studio, and Mistral AI.
### Configuration Examples
#### FastChat
```sh
OPENAI_API_BASE="http://localhost:8001/v1"
OPENAI_MODEL_NAME='oh-2.5m7b-q51' # Depending on the model you have available
OPENAI_MODEL_NAME='oh-2.5m7b-q51'
OPENAI_API_KEY=NA
```
### LM Studio
#### LM Studio
```sh
OPENAI_API_BASE="http://localhost:8000/v1"
OPENAI_MODEL_NAME=NA
OPENAI_API_KEY=NA
```
### Mistral API
#### Mistral API
```sh
OPENAI_API_KEY=your-mistral-api-key
OPENAI_API_BASE=https://api.mistral.ai/v1
OPENAI_MODEL_NAME="mistral-small" # Check documentation for available models
OPENAI_MODEL_NAME="mistral-small"
```
### text-gen-web-ui
```sh
OPENAI_API_BASE=http://localhost:5000/v1
OPENAI_MODEL_NAME=NA
OPENAI_API_KEY=NA
```
### Azure Open AI
Azure's OpenAI API needs a distinct setup, utilizing the `langchain_openai` component for Azure-specific configurations.
Configuration settings:
### Azure Open AI Configuration
For Azure OpenAI API integration, set the following environment variables:
```sh
AZURE_OPENAI_VERSION="2022-12-01"
AZURE_OPENAI_DEPLOYMENT=""
@@ -87,22 +77,24 @@ AZURE_OPENAI_ENDPOINT=""
AZURE_OPENAI_KEY=""
```
### Example Agent with Azure LLM
```python
from dotenv import load_dotenv
from crewai import Agent
from langchain_openai import AzureChatOpenAI
load_dotenv()
default_llm = AzureChatOpenAI(
azure_llm = AzureChatOpenAI(
azure_endpoint=os.environ.get("AZURE_OPENAI_ENDPOINT"),
api_key=os.environ.get("AZURE_OPENAI_KEY")
)
example_agent = Agent(
azure_agent = Agent(
role='Example Agent',
goal='Demonstrate custom LLM configuration',
backstory='A diligent explorer of GitHub docs.',
llm=default_llm
llm=azure_llm
)
```

View File

@@ -1,37 +1,47 @@
---
title: Implementing the Sequential Process in CrewAI
description: A guide to utilizing the sequential process for task execution in CrewAI projects.
title: Using the Sequential Processes in crewAI
description: A comprehensive guide to utilizing the sequential processe for task execution in crewAI projects.
---
## Introduction
The sequential process in CrewAI ensures tasks are executed one after the other, following a linear progression. This approach is akin to a relay race, where each agent completes their task before passing the baton to the next.
CrewAI offers a flexible framework for executing tasks in a structured manner, supporting both sequential and hierarchical processes. This guide outlines how to effectively implement these processes to ensure efficient task execution and project completion.
## Sequential Process Overview
This process is straightforward and effective, particularly for projects where tasks must be completed in a specific order to achieve the desired outcome.
The sequential process ensures tasks are executed one after the other, following a linear progression. This approach is ideal for projects requiring tasks to be completed in a specific order.
### Key Features
- **Linear Task Flow**: Tasks are handled in a predetermined sequence, ensuring orderly progression.
- **Simplicity**: Ideal for projects with clearly defined, step-by-step tasks.
- **Easy Monitoring**: Task completion can be easily tracked, offering clear insights into project progress.
- **Linear Task Flow**: Ensures orderly progression by handling tasks in a predetermined sequence.
- **Simplicity**: Best suited for projects with clear, step-by-step tasks.
- **Easy Monitoring**: Facilitates easy tracking of task completion and project progress.
## Implementing the Sequential Process
To apply the sequential process, assemble your crew and define the tasks in the order they need to be executed.
!!! note "Task assignment"
In the sequential process you need to make sure all tasks are assigned to the agents, as the agents will be the ones executing them.
Assemble your crew and define tasks in the order they need to be executed.
```python
from crewai import Crew, Process, Agent, Task
# Define your agents
researcher = Agent(role='Researcher', goal='Conduct foundational research')
analyst = Agent(role='Data Analyst', goal='Analyze research findings')
writer = Agent(role='Writer', goal='Draft the final report')
researcher = Agent(
role='Researcher',
goal='Conduct foundational research',
backstory='An experienced researcher with a passion for uncovering insights'
)
analyst = Agent(
role='Data Analyst',
goal='Analyze research findings',
backstory='A meticulous analyst with a knack for uncovering patterns'
)
writer = Agent(
role='Writer',
goal='Draft the final report',
backstory='A skilled writer with a talent for crafting compelling narratives'
)
# Define the tasks in sequence
research_task = Task(description='Gather relevant data', agent=researcher)
analysis_task = Task(description='Analyze the data', agent=analyst)
writing_task = Task(description='Compose the report', agent=writer)
research_task = Task(description='Gather relevant data...', agent=researcher)
analysis_task = Task(description='Analyze the data...', agent=analyst)
writing_task = Task(description='Compose the report...', agent=writer)
# Form the crew with a sequential process
report_crew = Crew(
@@ -42,9 +52,9 @@ report_crew = Crew(
```
### Workflow in Action
1. **Initial Task**: The first agent completes their task and signals completion.
2. **Subsequent Tasks**: Following agents pick up their tasks in the order defined, using the outcomes of preceding tasks as inputs.
3. **Completion**: The process concludes once the final task is executed, culminating in the project's completion.
1. **Initial Task**: In a sequential process, the first agent completes their task and signals completion.
2. **Subsequent Tasks**: Agents pick up their tasks based on the process type, with outcomes of preceding tasks or manager directives guiding their execution.
3. **Completion**: The process concludes once the final task is executed, leading to project completion.
## Conclusion
The sequential process in CrewAI provides a clear, straightforward path for task execution. It's particularly suited for projects requiring a logical progression of tasks, ensuring each step is completed before the next begins, thereby facilitating a cohesive final product.

View File

@@ -1,29 +1,23 @@
## Telemetry
CrewAI uses anonymous telemetry to collect usage data with the main purpose of helping us improve the library by focusing our efforts on the most used features, integrations and tools.
CrewAI utilizes anonymous telemetry to gather usage statistics with the primary goal of enhancing the library. Our focus is on improving and developing the features, integrations, and tools most utilized by our users.
There is NO data being collected on the prompts, tasks descriptions agents backstories or goals nor tools usage, no API calls, nor responses nor any data that is being processed by the agents, nor any secrets and env vars.
It's pivotal to understand that **NO data is collected** concerning prompts, task descriptions, agents' backstories or goals, usage of tools, API calls, responses, any data processed by the agents, or secrets and environment variables.
Data collected includes:
- Version of crewAI
- So we can understand how many users are using the latest version
- Version of Python
- So we can decide on what versions to better support
- General OS (e.g. number of CPUs, macOS/Windows/Linux)
- So we know what OS we should focus on and if we could build specific OS related features
- Number of agents and tasks in a crew
- So we make sure we are testing internally with similar use cases and educate people on the best practices
- Crew Process being used
- Understand where we should focus our efforts
- If Agents are using memory or allowing delegation
- Understand if we improved the features or maybe even drop them
- If Tasks are being executed in parallel or sequentially
- Understand if we should focus more on parallel execution
- Language model being used
- Improved support on most used languages
- Roles of agents in a crew
- Understand high level use cases so we can build better tools, integrations and examples about it
- Tools names available
- Understand out of the publically available tools, which ones are being used the most so we can improve them
### Data Collected Includes:
- **Version of CrewAI**: Assessing the adoption rate of our latest version helps us understand user needs and guide our updates.
- **Python Version**: Identifying the Python versions our users operate with assists in prioritizing our support efforts for these versions.
- **General OS Information**: Details like the number of CPUs and the operating system type (macOS, Windows, Linux) enable us to focus our development on the most used operating systems and explore the potential for OS-specific features.
- **Number of Agents and Tasks in a Crew**: Ensures our internal testing mirrors real-world scenarios, helping us guide users towards best practices.
- **Crew Process Utilization**: Understanding how crews are utilized aids in directing our development focus.
- **Memory and Delegation Use by Agents**: Insights into how these features are used help evaluate their effectiveness and future.
- **Task Execution Mode**: Knowing whether tasks are executed in parallel or sequentially influences our emphasis on enhancing parallel execution capabilities.
- **Language Model Utilization**: Supports our goal to improve support for the most popular languages among our users.
- **Roles of Agents within a Crew**: Understanding the various roles agents play aids in crafting better tools, integrations, and examples.
- **Tool Usage**: Identifying which tools are most frequently used allows us to prioritize improvements in those areas.
Users can opt-in sharing the complete telemetry data by setting the `share_crew` attribute to `True` on their Crews.
### Opt-In Futher Telemetry Sharing
Users can choose to share their complete telemetry data by enabling the `share_crew` attribute to `True` in their crew configurations. This opt-in approach respects user privacy and aligns with data protection standards by ensuring users have control over their data sharing preferences.
### Updates and Revisions
We are committed to maintaining the accuracy and transparency of our documentation. Regular reviews and updates are performed to ensure our documentation accurately reflects the latest developments of our codebase and telemetry practices. Users are encouraged to review this section for the most current information on our data collection practices and how they contribute to the improvement of CrewAI.

743
poetry.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,7 +1,7 @@
[tool.poetry]
name = "crewai"
version = "0.14.1"
version = "0.16.0"
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
authors = ["Joao Moura <joao@crewai.com>"]
readme = "README.md"
@@ -26,7 +26,11 @@ opentelemetry-sdk = "^1.22.0"
opentelemetry-exporter-otlp-proto-http = "^1.22.0"
instructor = "^0.5.2"
regex = "^2023.12.25"
crewai-tools = "^0.0.6"
crewai-tools = { version = "^0.0.12", optional = true }
click = "^8.1.7"
[tool.poetry.extras]
tools = ["crewai-tools"]
[tool.poetry.group.dev.dependencies]
isort = "^5.13.2"
@@ -41,12 +45,14 @@ mkdocs-material = {extras = ["imaging"], version = "^9.5.7"}
mkdocs-material-extensions = "^1.3.1"
pillow = "^10.2.0"
cairosvg = "^2.7.1"
crewai_tools = "^0.0.12"
[tool.isort]
profile = "black"
known_first_party = ["crewai"]
[tool.poetry.scripts]
crewai = "crewai.cli.cli:crewai"
[tool.poetry.group.test.dependencies]
pytest = "^8.0.0"

View File

@@ -1,8 +1,7 @@
import os
import uuid
from typing import Any, List, Optional, Tuple
from typing import Any, Dict, List, Optional, Tuple
from crewai_tools import BaseTool as CrewAITool
from langchain.agents.agent import RunnableAgent
from langchain.agents.tools import tool as LangChainTool
from langchain.memory import ConversationSummaryMemory
@@ -23,6 +22,7 @@ from pydantic_core import PydanticCustomError
from crewai.agents import CacheHandler, CrewAgentExecutor, CrewAgentParser, ToolsHandler
from crewai.utilities import I18N, Logger, Prompts, RPMController
from crewai.utilities.token_counter_callback import TokenCalcHandler, TokenProcess
class Agent(BaseModel):
@@ -51,7 +51,9 @@ class Agent(BaseModel):
_logger: Logger = PrivateAttr()
_rpm_controller: RPMController = PrivateAttr(default=None)
_request_within_rpm_limit: Any = PrivateAttr(default=None)
_token_process: TokenProcess = TokenProcess()
formatting_errors: int = 0
model_config = ConfigDict(arbitrary_types_allowed=True)
id: UUID4 = Field(
default_factory=uuid.uuid4,
@@ -66,7 +68,7 @@ class Agent(BaseModel):
description="Maximum number of requests per minute for the agent execution to be respected.",
)
memory: bool = Field(
default=True, description="Whether the agent should have memory or not"
default=False, description="Whether the agent should have memory or not"
)
verbose: bool = Field(
default=False, description="Verbose mode for the Agent Execution"
@@ -123,8 +125,12 @@ class Agent(BaseModel):
return self
@model_validator(mode="after")
def check_agent_executor(self) -> "Agent":
"""Check if the agent executor is set."""
def set_agent_executor(self) -> "Agent":
"""set agent executor is set."""
if hasattr(self.llm, "model_name"):
self.llm.callbacks = [
TokenCalcHandler(self.llm.model_name, self._token_process)
]
if not self.agent_executor:
self.set_cache_handler(self.cache_handler)
return self
@@ -243,20 +249,20 @@ class Agent(BaseModel):
)
bind = self.llm.bind(stop=[self.i18n.slice("observation")])
inner_agent = agent_args | execution_prompt | bind | CrewAgentParser()
inner_agent = agent_args | execution_prompt | bind | CrewAgentParser(agent=self)
self.agent_executor = CrewAgentExecutor(
agent=RunnableAgent(runnable=inner_agent), **executor_args
)
def _parse_tools(self, tools: List[Any]) -> List[LangChainTool]:
"""Parse tools to be used for the task."""
tools_list = []
for tool in tools:
if isinstance(tool, CrewAITool):
tools_list.append(tool.to_langchain())
else:
tools_list.append(tool)
return tools_list
def interpolate_inputs(self, inputs: Dict[str, Any]) -> None:
"""Interpolate inputs into the agent description and backstory."""
self.role = self.role.format(**inputs)
self.goal = self.goal.format(**inputs)
self.backstory = self.backstory.format(**inputs)
def increment_formatting_errors(self) -> None:
"""Count the formatting errors of the agent."""
self.formatting_errors += 1
def format_log_to_str(
self,
@@ -271,6 +277,22 @@ class Agent(BaseModel):
thoughts += f"\n{observation_prefix}{observation}\n{llm_prefix}"
return thoughts
def _parse_tools(self, tools: List[Any]) -> List[LangChainTool]:
"""Parse tools to be used for the task."""
# tentatively try to import from crewai_tools import BaseTool as CrewAITool
tools_list = []
try:
from crewai_tools import BaseTool as CrewAITool
for tool in tools:
if isinstance(tool, CrewAITool):
tools_list.append(tool.to_langchain())
else:
tools_list.append(tool)
except ModuleNotFoundError:
tools_list.append(tool)
return tools_list
@staticmethod
def __tools_names(tools) -> str:
return ", ".join([t.name for t in tools])

View File

@@ -1,4 +1,4 @@
from typing import Union
from typing import Any, Union
from langchain.agents.output_parsers import ReActSingleInputOutputParser
from langchain_core.agents import AgentAction, AgentFinish
@@ -34,6 +34,7 @@ class CrewAgentParser(ReActSingleInputOutputParser):
"""
_i18n: I18N = I18N()
agent: Any = None
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
includes_answer = FINAL_ANSWER_ACTION in text
@@ -41,6 +42,7 @@ class CrewAgentParser(ReActSingleInputOutputParser):
if includes_tool:
if includes_answer:
self.agent.increment_formatting_errors()
raise OutputParserException(f"{FINAL_ANSWER_AND_TOOL_ERROR_MESSAGE}")
return AgentAction("", "", text)
@@ -52,6 +54,7 @@ class CrewAgentParser(ReActSingleInputOutputParser):
format = self._i18n.slice("format_without_tools")
error = f"{format}"
self.agent.increment_formatting_errors()
raise OutputParserException(
error,
observation=error,

View File

@@ -41,6 +41,7 @@ class Crew(BaseModel):
full_output: Whether the crew should return the full output with all tasks outputs or just the final output.
step_callback: Callback to be executed after each step for every agents execution.
share_crew: Whether you want to share the complete crew infromation and execution with crewAI to make the library better, and allow us to train models.
inputs: Any inputs that the crew will use in tasks or agents, it will be interpolated in promtps.
"""
__hash__ = object.__hash__ # type: ignore
@@ -53,6 +54,10 @@ class Crew(BaseModel):
agents: List[Agent] = Field(default_factory=list)
process: Process = Field(default=Process.sequential)
verbose: Union[int, bool] = Field(default=0)
usage_metrics: Optional[dict] = Field(
default=None,
description="Metrics for the LLM usage during all tasks execution.",
)
full_output: Optional[bool] = Field(
default=False,
description="Whether the crew should return the full output with all tasks outputs or just the final output.",
@@ -63,6 +68,10 @@ class Crew(BaseModel):
function_calling_llm: Optional[Any] = Field(
description="Language model that will run the agent.", default=None
)
inputs: Optional[Dict[str, Any]] = Field(
description="Any inputs that the crew will use in tasks or agents, it will be interpolated in promtps.",
default={},
)
config: Optional[Union[Json, Dict[str, Any]]] = Field(default=None)
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
share_crew: Optional[bool] = Field(default=False)
@@ -125,6 +134,15 @@ class Crew(BaseModel):
)
return self
@model_validator(mode="after")
def interpolate_inputs(self):
"""Interpolates the inputs in the tasks and agents."""
for task in self.tasks:
task.interpolate_inputs(self.inputs)
for agent in self.agents:
agent.interpolate_inputs(self.inputs)
return self
@model_validator(mode="after")
def check_config(self):
"""Validates that the crew is properly configured with agents and tasks."""
@@ -187,14 +205,27 @@ class Crew(BaseModel):
agent.step_callback = self.step_callback
agent.create_agent_executor()
if self.process == Process.sequential:
return self._run_sequential_process()
if self.process == Process.hierarchical:
return self._run_hierarchical_process()
metrics = []
raise NotImplementedError(
f"The process '{self.process}' is not implemented yet."
)
if self.process == Process.sequential:
result = self._run_sequential_process()
elif self.process == Process.hierarchical:
result, manager_metrics = self._run_hierarchical_process()
metrics.append(manager_metrics)
else:
raise NotImplementedError(
f"The process '{self.process}' is not implemented yet."
)
metrics = metrics + [
agent._token_process.get_summary() for agent in self.agents
]
self.usage_metrics = {
key: sum([m[key] for m in metrics if m is not None]) for key in metrics[0]
}
return result
def _run_sequential_process(self) -> str:
"""Executes tasks sequentially and returns the final output."""
@@ -204,7 +235,8 @@ class Crew(BaseModel):
agents_for_delegation = [
agent for agent in self.agents if agent != task.agent
]
task.tools += AgentTools(agents=agents_for_delegation).tools()
if len(self.agents) > 1 and len(agents_for_delegation) > 0:
task.tools += AgentTools(agents=agents_for_delegation).tools()
role = task.agent.role if task.agent is not None else "None"
self._logger.log("debug", f"Working Agent: {role}")
@@ -247,7 +279,7 @@ class Crew(BaseModel):
)
self._finish_execution(task_output)
return self._format_output(task_output)
return self._format_output(task_output), manager._token_process.get_summary()
def _format_output(self, output: str) -> str:
"""Formats the output of the crew execution."""

View File

@@ -1,6 +1,6 @@
import threading
import uuid
from typing import Any, List, Optional, Type
from typing import Any, Dict, List, Optional, Type
from langchain_openai import ChatOpenAI
from pydantic import UUID4, BaseModel, Field, field_validator, model_validator
@@ -20,6 +20,8 @@ class Task(BaseModel):
__hash__ = object.__hash__ # type: ignore
used_tools: int = 0
tools_errors: int = 0
delegations: int = 0
i18n: I18N = I18N()
thread: threading.Thread = None
description: str = Field(description="Description of the actual task.")
@@ -171,6 +173,20 @@ class Task(BaseModel):
tasks_slices = [self.description, output]
return "\n".join(tasks_slices)
def interpolate_inputs(self, inputs: Dict[str, Any]) -> None:
"""Interpolate inputs into the task description and expected output."""
self.description = self.description.format(**inputs)
if self.expected_output:
self.expected_output = self.expected_output.format(**inputs)
def increment_tools_errors(self) -> None:
"""Increment the tools errors counter."""
self.tools_errors += 1
def increment_delegations(self) -> None:
"""Increment the delegations counter."""
self.delegations += 1
def _export_output(self, result: str) -> Any:
exported_result = result
instructions = "I'm gonna convert this raw text into valid JSON."

View File

@@ -58,7 +58,7 @@ class Telemetry:
try:
trace.set_tracer_provider(self.provider)
except Exception:
self.ready = False
pass
def crew_creation(self, crew):
"""Records the creation of a crew."""

View File

@@ -73,11 +73,13 @@ class ToolUsage:
if isinstance(calling, ToolUsageErrorException):
error = calling.message
self._printer.print(content=f"\n\n{error}\n", color="red")
self.task.increment_tools_errors()
return error
try:
tool = self._select_tool(calling.tool_name)
except Exception as e:
error = getattr(e, "message", str(e))
self.task.increment_tools_errors()
self._printer.print(content=f"\n\n{error}\n", color="red")
return error
return f"{self._use(tool_string=tool_string, tool=tool, calling=calling)}\n\n{self._i18n.slice('final_answer_format')}"
@@ -103,7 +105,7 @@ class ToolUsage:
result = self._format_result(result=result)
return result
except Exception:
pass
self.task.increment_tools_errors()
result = self.tools_handler.cache.read(
tool=calling.tool_name, input=calling.arguments
@@ -111,6 +113,12 @@ class ToolUsage:
if not result:
try:
if calling.tool_name in [
"Delegate work to co-worker",
"Ask question to co-worker",
]:
self.task.increment_delegations()
if calling.arguments:
result = tool._run(**calling.arguments)
else:
@@ -125,8 +133,10 @@ class ToolUsage:
error = ToolUsageErrorException(
f'\n{error_message}.\nMoving one then. {self._i18n.slice("format").format(tool_names=self.tools_names)}'
).message
self.task.increment_tools_errors()
self._printer.print(content=f"\n\n{error_message}\n", color="red")
return error
self.task.increment_tools_errors()
return self.use(calling=calling, tool_string=tool_string)
self.tools_handler.on_tool_use(calling=calling, output=result)
@@ -166,6 +176,7 @@ class ToolUsage:
for tool in self.tools:
if tool.name.lower().strip() == tool_name.lower().strip():
return tool
self.task.increment_tools_errors()
raise Exception(f"Tool '{tool_name}' not found.")
def _render(self) -> str:
@@ -218,6 +229,7 @@ class ToolUsage:
self._run_attempts += 1
if self._run_attempts > self._max_parsing_attempts:
self._telemetry.tool_usage_error(llm=self.llm)
self.task.increment_tools_errors()
self._printer.print(content=f"\n\n{e}\n", color="red")
return ToolUsageErrorException(
f'{self._i18n.errors("tool_usage_error")}\n{self._i18n.slice("format").format(tool_names=self.tools_names)}'

View File

@@ -27,7 +27,9 @@ class PydanticSchemaParser(BaseModel):
field_type = field.annotation
if get_origin(field_type) is list:
list_item_type = get_args(field_type)[0]
if issubclass(list_item_type, BaseModel):
if isinstance(list_item_type, type) and issubclass(
list_item_type, BaseModel
):
nested_schema = self._get_model_schema(list_item_type, depth + 1)
return f"List[\n{nested_schema}\n{' ' * 4 * depth}]"
else:

View File

@@ -0,0 +1,60 @@
from typing import Any, Dict, List
import tiktoken
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import LLMResult
class TokenProcess:
total_tokens: int = 0
prompt_tokens: int = 0
completion_tokens: int = 0
successful_requests: int = 0
def sum_prompt_tokens(self, tokens: int):
self.prompt_tokens = self.prompt_tokens + tokens
self.total_tokens = self.total_tokens + tokens
def sum_completion_tokens(self, tokens: int):
self.completion_tokens = self.completion_tokens + tokens
self.total_tokens = self.total_tokens + tokens
def sum_successful_requests(self, requests: int):
self.successful_requests = self.successful_requests + requests
def get_summary(self) -> str:
return {
"total_tokens": self.total_tokens,
"prompt_tokens": self.prompt_tokens,
"completion_tokens": self.completion_tokens,
"successful_requests": self.successful_requests,
}
class TokenCalcHandler(BaseCallbackHandler):
model: str = ""
token_cost_process: TokenProcess
def __init__(self, model, token_cost_process):
self.model = model
self.token_cost_process = token_cost_process
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
if "gpt" in self.model:
encoding = tiktoken.encoding_for_model(self.model)
else:
encoding = tiktoken.get_encoding("cl100k_base")
if self.token_cost_process == None:
return
for prompt in prompts:
self.token_cost_process.sum_prompt_tokens(len(encoding.encode(prompt)))
async def on_llm_new_token(self, token: str, **kwargs) -> None:
self.token_cost_process.sum_completion_tokens(1)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
self.token_cost_process.sum_successful_requests(1)

View File

@@ -4,11 +4,13 @@ from unittest.mock import patch
import pytest
from langchain.tools import tool
from langchain_core.exceptions import OutputParserException
from langchain_openai import ChatOpenAI
from crewai import Agent, Crew, Task
from crewai.agents.cache import CacheHandler
from crewai.agents.executor import CrewAgentExecutor
from crewai.agents.parser import CrewAgentParser
from crewai.tools.tool_calling import InstructorToolCalling
from crewai.tools.tool_usage import ToolUsage
from crewai.utilities import RPMController
@@ -184,7 +186,7 @@ def test_cache_hitting():
agent=agent,
)
output = agent.execute_task(task)
assert output == "The result of the multiplication of 2 and 6 is 0."
assert output == "0"
read.assert_called_with(
tool="multiplier", input={"first_number": 2, "second_number": 6}
)
@@ -416,7 +418,7 @@ def test_agent_without_max_rpm_respet_crew_rpm(capsys):
moveon.return_value = True
crew.kickoff()
captured = capsys.readouterr()
assert "Action: get_final_answer" in captured.out
assert "get_final_answer" in captured.out
assert "Max RPM reached, waiting for next minute to start." in captured.out
moveon.assert_called_once()
@@ -576,3 +578,39 @@ def test_agent_function_calling_llm():
crew.kickoff()
private_mock.assert_called()
def test_agent_count_formatting_error():
from unittest.mock import patch
agent1 = Agent(
role="test role",
goal="test goal",
backstory="test backstory",
verbose=True,
)
parser = CrewAgentParser()
parser.agent = agent1
with patch.object(Agent, "increment_formatting_errors") as mock_count_errors:
test_text = "This text does not match expected formats."
with pytest.raises(OutputParserException):
parser.parse(test_text)
mock_count_errors.assert_called_once()
def test_agent_llm_uses_token_calc_handler_with_llm_has_model_name():
agent1 = Agent(
role="test role",
goal="test goal",
backstory="test backstory",
verbose=True,
)
assert len(agent1.llm.callbacks) == 1
assert agent1.llm.callbacks[0].__class__.__name__ == "TokenCalcHandler"
assert agent1.llm.callbacks[0].model == "gpt-4"
assert (
agent1.llm.callbacks[0].token_cost_process.__class__.__name__ == "TokenProcess"
)

View File

@@ -0,0 +1,983 @@
interactions:
- request:
body: '{"messages": [{"role": "user", "content": "You are Crew Manager.\nYou are
a seasoned manager with a knack for getting the best out of your team.\nYou
are also known for your ability to delegate work to the right people, and to
ask the right questions to get the best out of your team.\nEven though you don''t
perform tasks by yourself, you have a lot of experience in the field, which
allows you to properly evaluate the work of your team members.\n\nYour personal
goal is: Manage the team to complete the task in the best way possible.I have
access to ONLY the following tools, I can use only these, use one at time:\n\nDelegate
work to co-worker: Delegate work to co-worker(coworker: str, task: str, context:
str) - Delegate a specific task to one of the following co-workers: - Researcher\nThe
input to this tool should be the coworker, the task you want them to do, and
ALL necessary context to exectue the task, they know nothing about the task,
so share absolute everything you know, don''t reference things but instead explain
them.\nAsk question to co-worker: Ask question to co-worker(coworker: str, question:
str, context: str) - Ask a specific question to one of the following co-workers:
- Researcher\nThe input to this tool should be the coworker, the question you
have for them, and ALL necessary context to ask the question properly, they
know nothing about the question, so share absolute everything you know, don''t
reference things but instead explain them.\n\nTo use a tool I MUST use the exact
following format:\n\n```\nUse Tool: the tool I wanna use, should be one of [Delegate
work to co-worker, Ask question to co-worker] and absolute all relevant input
and context for using the tool, I must use only one tool at once.\nResult: [result
of the tool]\n```\n\nTo give my final answer I''ll use the exact following format:\n\n```\nFinal
Answer: [my expected final answer, entire content of my most complete final
answer goes here]\n```\nI MUST use these formats, my jobs depends on it!\n\nCurrent
Task: say howdy\nYour final answer must be: Howdy!\n\n Begin! This is VERY important
to you, your job depends on it!\n\n\n"}], "model": "gpt-4", "n": 1, "stop":
["\nResult"], "stream": true, "temperature": 0.0}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, br
connection:
- keep-alive
content-length:
- '2223'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: 'data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Use"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
Tool"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
Delegate"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
work"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
to"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
co"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"-worker"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"cow"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"ork"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"er"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
Research"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"er"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"task"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
say"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
how"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"dy"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"context"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
We"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
need"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
to"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
greet"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
someone"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
in"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
a"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
friendly"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":","},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
informal"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
way"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
The"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
term"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
\""},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"how"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"dy"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"\""},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
is"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
a"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
casual"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
greeting"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
often"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
used"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
in"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
the"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
United"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
States"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":","},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
particularly"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
in"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
the"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
southern"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
and"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
western"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
regions"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
It"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"''s"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
a"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
contraction"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
of"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
\""},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"how"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
do"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
you"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
do"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"\","},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
and"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
it"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"''s"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
equivalent"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
to"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
saying"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
\""},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"hello"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"\"."},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
The"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
task"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
is"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
to"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
simply"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
say"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
\""},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"how"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"dy"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"\"."},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6R80VTnILcsYYhu6Kcu2nH5g7dc","object":"chat.completion.chunk","created":1709096786,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
data: [DONE]
'
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 85c62361cb0300dd-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Type:
- text/event-stream
Date:
- Wed, 28 Feb 2024 05:06:26 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=5oZs63dZLAiJVsMY5uQ7ak9HK0tVOst7Zje28KTpk.k-1709096786-1.0-AYuae5CK3ehBFpLq3OlQljPTqek/CaKiflLrbel+Bswjwo8HdfZFsMN5VVlV4hNdqchTvTgAp4x8Ncj1tYT5Qa4=;
path=/; expires=Wed, 28-Feb-24 05:36:26 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=0kgttv029QX2lRGuZX6J5VUNsz5xgHpcA.CpfE8vI7Q-1709096786875-0.0-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '372'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299468'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 106ms
x-request-id:
- req_f05460238d0247e8c529eddfd65127a2
status:
code: 200
message: OK
- request:
body: !!binary |
CvMiCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkSyiIKEgoQY3Jld2FpLnRl
bGVtZXRyeRKuAQoQYNbXjiQFPD01zFrwu5iogRIIYhFnuzmi6a8qEFRvb2wgVXNhZ2UgRXJyb3Iw
ATm4C6S2eO63F0GYOqS2eO63F0pmCgNsbG0SXwpdeyJuYW1lIjogbnVsbCwgIm1vZGVsX25hbWUi
OiAiZ3B0LTMuNS10dXJiby0wMTI1IiwgInRlbXBlcmF0dXJlIjogMC43LCAiY2xhc3MiOiAiQ2hh
dE9wZW5BSSJ9egIYARKCCAoQmjdDdWLBesNvZKHmPF3B1RIIqxnLgk/uxpUqDENyZXcgQ3JlYXRl
ZDABOXB1prp47rcXQWDFp7p47rcXShoKDmNyZXdhaV92ZXJzaW9uEggKBjAuMTQuNEoaCg5weXRo
b25fdmVyc2lvbhIICgYzLjExLjdKMQoHY3Jld19pZBImCiRlYjJjYmMzOC1lYzA3LTQ5ZjctODQ5
Ny1mODRlMGY2NTYzNzJKHAoMY3Jld19wcm9jZXNzEgwKCnNlcXVlbnRpYWxKFQoNY3Jld19sYW5n
dWFnZRIECgJlbkoaChRjcmV3X251bWJlcl9vZl90YXNrcxICGAFKGwoVY3Jld19udW1iZXJfb2Zf
YWdlbnRzEgIYAUraAgoLY3Jld19hZ2VudHMSygIKxwJbeyJpZCI6ICIyZDAzNzU3Mi03Mjk3LTQ5
NTktODFkZS04NDMxM2ZhOTJjNjYiLCAicm9sZSI6ICJSZXNlYXJjaGVyIiwgIm1lbW9yeV9lbmFi
bGVkPyI6IGZhbHNlLCAidmVyYm9zZT8iOiBmYWxzZSwgIm1heF9pdGVyIjogMTUsICJtYXhfcnBt
IjogbnVsbCwgImkxOG4iOiAiZW4iLCAibGxtIjogIntcIm5hbWVcIjogbnVsbCwgXCJtb2RlbF9u
YW1lXCI6IFwiZ3B0LTRcIiwgXCJ0ZW1wZXJhdHVyZVwiOiAwLjcsIFwiY2xhc3NcIjogXCJDaGF0
T3BlbkFJXCJ9IiwgImRlbGVnYXRpb25fZW5hYmxlZD8iOiBmYWxzZSwgInRvb2xzX25hbWVzIjog
WyJyZXR1cm5fZGF0YSJdfV1KmQEKCmNyZXdfdGFza3MSigEKhwFbeyJpZCI6ICIwZDIyMDUwNS01
MmZlLTQ2YjUtODc3MS01YWNiM2Y2YjY3YjAiLCAiYXN5bmNfZXhlY3V0aW9uPyI6IGZhbHNlLCAi
YWdlbnRfcm9sZSI6ICJSZXNlYXJjaGVyIiwgInRvb2xzX25hbWVzIjogWyJyZXR1cm5fZGF0YSJd
fV1KKAoIcGxhdGZvcm0SHAoabWFjT1MtMTQuMy1hcm02NC1hcm0tNjRiaXRKHAoQcGxhdGZvcm1f
cmVsZWFzZRIICgYyMy4zLjBKGwoPcGxhdGZvcm1fc3lzdGVtEggKBkRhcndpbkp7ChBwbGF0Zm9y
bV92ZXJzaW9uEmcKZURhcndpbiBLZXJuZWwgVmVyc2lvbiAyMy4zLjA6IFdlZCBEZWMgMjAgMjE6
MzA6NTkgUFNUIDIwMjM7IHJvb3Q6eG51LTEwMDAyLjgxLjV+Ny9SRUxFQVNFX0FSTTY0X1Q2MDMw
SgoKBGNwdXMSAhgMegIYARLHAQoQD+8RIH+t/MvIeYMta48slxIInVQ/6xygtMcqClRvb2wgVXNh
Z2UwATmAMny9eO63F0EwaXy9eO63F0oaCgl0b29sX25hbWUSDQoLcmV0dXJuX2RhdGFKDgoIYXR0
ZW1wdHMSAhgBSlkKA2xsbRJSClB7Im5hbWUiOiBudWxsLCAibW9kZWxfbmFtZSI6ICJncHQtNCIs
ICJ0ZW1wZXJhdHVyZSI6IDAuNywgImNsYXNzIjogIkNoYXRPcGVuQUkifXoCGAES5QcKEBg3Ofbq
+tFCvKV8Oe5JDssSCIJmxSWDDcLTKgxDcmV3IENyZWF0ZWQwATkoT7TAeO63F0FIl7XAeO63F0oa
Cg5jcmV3YWlfdmVyc2lvbhIICgYwLjE0LjRKGgoOcHl0aG9uX3ZlcnNpb24SCAoGMy4xMS43SjEK
B2NyZXdfaWQSJgokZGZkZmM2YTItZjNiOC00MWUzLWE4NzYtNGExOWI3NTFkNDAzShwKDGNyZXdf
cHJvY2VzcxIMCgpzZXF1ZW50aWFsShUKDWNyZXdfbGFuZ3VhZ2USBAoCZW5KGgoUY3Jld19udW1i
ZXJfb2ZfdGFza3MSAhgBShsKFWNyZXdfbnVtYmVyX29mX2FnZW50cxICGAFKzAIKC2NyZXdfYWdl
bnRzErwCCrkCW3siaWQiOiAiODE2NTU5ZTktNDZlOC00NjMwLTg2ZTYtNjM5OTE1MzQ0MzlmIiwg
InJvbGUiOiAiUmVzZWFyY2hlciIsICJtZW1vcnlfZW5hYmxlZD8iOiBmYWxzZSwgInZlcmJvc2U/
IjogZmFsc2UsICJtYXhfaXRlciI6IDE1LCAibWF4X3JwbSI6IG51bGwsICJpMThuIjogImVuIiwg
ImxsbSI6ICJ7XCJuYW1lXCI6IG51bGwsIFwibW9kZWxfbmFtZVwiOiBcImdwdC00XCIsIFwidGVt
cGVyYXR1cmVcIjogMC43LCBcImNsYXNzXCI6IFwiQ2hhdE9wZW5BSVwifSIsICJkZWxlZ2F0aW9u
X2VuYWJsZWQ/IjogdHJ1ZSwgInRvb2xzX25hbWVzIjogW119XUqKAQoKY3Jld190YXNrcxJ8Cnpb
eyJpZCI6ICI1OWI2NzZlYi04Y2U3LTQxZTEtYTA5MC1lZGJmNzRjOWY0ZDciLCAiYXN5bmNfZXhl
Y3V0aW9uPyI6IGZhbHNlLCAiYWdlbnRfcm9sZSI6ICJSZXNlYXJjaGVyIiwgInRvb2xzX25hbWVz
IjogW119XUooCghwbGF0Zm9ybRIcChptYWNPUy0xNC4zLWFybTY0LWFybS02NGJpdEocChBwbGF0
Zm9ybV9yZWxlYXNlEggKBjIzLjMuMEobCg9wbGF0Zm9ybV9zeXN0ZW0SCAoGRGFyd2luSnsKEHBs
YXRmb3JtX3ZlcnNpb24SZwplRGFyd2luIEtlcm5lbCBWZXJzaW9uIDIzLjMuMDogV2VkIERlYyAy
MCAyMTozMDo1OSBQU1QgMjAyMzsgcm9vdDp4bnUtMTAwMDIuODEuNX43L1JFTEVBU0VfQVJNNjRf
VDYwMzBKCgoEY3B1cxICGAx6AhgBEuYHChBQG5baojgWTTAUUORJsU23EghdLLRb9gcgqCoMQ3Jl
dyBDcmVhdGVkMAE5kAxcwXjutxdBUGRdwXjutxdKGgoOY3Jld2FpX3ZlcnNpb24SCAoGMC4xNC40
ShoKDnB5dGhvbl92ZXJzaW9uEggKBjMuMTEuN0oxCgdjcmV3X2lkEiYKJDA0YThiNTNiLWJiZDgt
NDZkYy05MDNmLWE0OWI3YzQ4MjU1MkocCgxjcmV3X3Byb2Nlc3MSDAoKc2VxdWVudGlhbEoVCg1j
cmV3X2xhbmd1YWdlEgQKAmVuShoKFGNyZXdfbnVtYmVyX29mX3Rhc2tzEgIYAUobChVjcmV3X251
bWJlcl9vZl9hZ2VudHMSAhgBSs0CCgtjcmV3X2FnZW50cxK9Agq6Alt7ImlkIjogImZjYzY1ZWJm
LTlmOTQtNGZkNi04MzYxLTMzMGJkNzVhMGExZCIsICJyb2xlIjogIlJlc2VhcmNoZXIiLCAibWVt
b3J5X2VuYWJsZWQ/IjogZmFsc2UsICJ2ZXJib3NlPyI6IGZhbHNlLCAibWF4X2l0ZXIiOiAxNSwg
Im1heF9ycG0iOiBudWxsLCAiaTE4biI6ICJlbiIsICJsbG0iOiAie1wibmFtZVwiOiBudWxsLCBc
Im1vZGVsX25hbWVcIjogXCJncHQtNFwiLCBcInRlbXBlcmF0dXJlXCI6IDAuNywgXCJjbGFzc1wi
OiBcIkNoYXRPcGVuQUlcIn0iLCAiZGVsZWdhdGlvbl9lbmFibGVkPyI6IGZhbHNlLCAidG9vbHNf
bmFtZXMiOiBbXX1dSooBCgpjcmV3X3Rhc2tzEnwKelt7ImlkIjogIjliNTU1NDU5LTVmMjYtNDli
OS1hMTI0LTQ2OTE1MzA0YjljMCIsICJhc3luY19leGVjdXRpb24/IjogZmFsc2UsICJhZ2VudF9y
b2xlIjogIlJlc2VhcmNoZXIiLCAidG9vbHNfbmFtZXMiOiBbXX1dSigKCHBsYXRmb3JtEhwKGm1h
Y09TLTE0LjMtYXJtNjQtYXJtLTY0Yml0ShwKEHBsYXRmb3JtX3JlbGVhc2USCAoGMjMuMy4wShsK
D3BsYXRmb3JtX3N5c3RlbRIICgZEYXJ3aW5KewoQcGxhdGZvcm1fdmVyc2lvbhJnCmVEYXJ3aW4g
S2VybmVsIFZlcnNpb24gMjMuMy4wOiBXZWQgRGVjIDIwIDIxOjMwOjU5IFBTVCAyMDIzOyByb290
OnhudS0xMDAwMi44MS41fjcvUkVMRUFTRV9BUk02NF9UNjAzMEoKCgRjcHVzEgIYDHoCGAES4gcK
EOtjiZ481WdO6n81Pn20ytUSCErYNg+QhMMWKgxDcmV3IENyZWF0ZWQwATmg6ufEeO63F0GgYenE
eO63F0oaCg5jcmV3YWlfdmVyc2lvbhIICgYwLjE0LjRKGgoOcHl0aG9uX3ZlcnNpb24SCAoGMy4x
MS43SjEKB2NyZXdfaWQSJgokNzY5NWRlMGItOWEwOS00MjBmLWFhMjgtYjEyNjBhOGJlZjJmSh4K
DGNyZXdfcHJvY2VzcxIOCgxoaWVyYXJjaGljYWxKFQoNY3Jld19sYW5ndWFnZRIECgJlbkoaChRj
cmV3X251bWJlcl9vZl90YXNrcxICGAFKGwoVY3Jld19udW1iZXJfb2ZfYWdlbnRzEgIYAUrNAgoL
Y3Jld19hZ2VudHMSvQIKugJbeyJpZCI6ICJjM2NjMjgxNC03OTczLTQxZmUtYTUwMi01MTY4MjA4
MDkwMTYiLCAicm9sZSI6ICJSZXNlYXJjaGVyIiwgIm1lbW9yeV9lbmFibGVkPyI6IGZhbHNlLCAi
dmVyYm9zZT8iOiBmYWxzZSwgIm1heF9pdGVyIjogMTUsICJtYXhfcnBtIjogbnVsbCwgImkxOG4i
OiAiZW4iLCAibGxtIjogIntcIm5hbWVcIjogbnVsbCwgXCJtb2RlbF9uYW1lXCI6IFwiZ3B0LTRc
IiwgXCJ0ZW1wZXJhdHVyZVwiOiAwLjcsIFwiY2xhc3NcIjogXCJDaGF0T3BlbkFJXCJ9IiwgImRl
bGVnYXRpb25fZW5hYmxlZD8iOiBmYWxzZSwgInRvb2xzX25hbWVzIjogW119XUqEAQoKY3Jld190
YXNrcxJ2CnRbeyJpZCI6ICJmMzVhNjZjMC02YTg4LTQxNjktYTEwMC0wNjIwNTIyNTZiNTUiLCAi
YXN5bmNfZXhlY3V0aW9uPyI6IGZhbHNlLCAiYWdlbnRfcm9sZSI6ICJOb25lIiwgInRvb2xzX25h
bWVzIjogW119XUooCghwbGF0Zm9ybRIcChptYWNPUy0xNC4zLWFybTY0LWFybS02NGJpdEocChBw
bGF0Zm9ybV9yZWxlYXNlEggKBjIzLjMuMEobCg9wbGF0Zm9ybV9zeXN0ZW0SCAoGRGFyd2luSnsK
EHBsYXRmb3JtX3ZlcnNpb24SZwplRGFyd2luIEtlcm5lbCBWZXJzaW9uIDIzLjMuMDogV2VkIERl
YyAyMCAyMTozMDo1OSBQU1QgMjAyMzsgcm9vdDp4bnUtMTAwMDIuODEuNX43L1JFTEVBU0VfQVJN
NjRfVDYwMzBKCgoEY3B1cxICGAx6AhgB
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate, br
Connection:
- keep-alive
Content-Length:
- '4470'
Content-Type:
- application/x-protobuf
User-Agent:
- OTel-OTLP-Exporter-Python/1.23.0
method: POST
uri: http://telemetry.crewai.com:4318/v1/traces
response:
body:
string: "\n\0"
headers:
Content-Length:
- '2'
Content-Type:
- application/x-protobuf
Date:
- Wed, 28 Feb 2024 05:06:31 GMT
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "Only tools available:\n###\nTool
Name: delegate work to co-worker\nTool Description: Delegate work to co-worker(coworker:
str, task: str, context: str) - Delegate a specific task to one of the following
co-workers: - Researcher\nThe input to this tool should be the coworker, the
task you want them to do, and ALL necessary context to exectue the task, they
know nothing about the task, so share absolute everything you know, don''t reference
things but instead explain them.\nTool Arguments: {''coworker'': {''type'':
''string''}, ''task'': {''type'': ''string''}, ''context'': {''type'': ''string''}}\n--\nTool
Name: ask question to co-worker\nTool Description: Ask question to co-worker(coworker:
str, question: str, context: str) - Ask a specific question to one of the following
co-workers: - Researcher\nThe input to this tool should be the coworker, the
question you have for them, and ALL necessary context to ask the question properly,
they know nothing about the question, so share absolute everything you know,
don''t reference things but instead explain them.\nTool Arguments: {''coworker'':
{''type'': ''string''}, ''question'': {''type'': ''string''}, ''context'': {''type'':
''string''}}\n\nReturn a valid schema for the tool, the tool name must be exactly
equal one of the options, use this text to inform the valid ouput schema:\n\nUse
Tool: Delegate work to co-worker\ncoworker: Researcher\ntask: say howdy\ncontext:
We need to greet someone in a friendly, informal way. The term \"howdy\" is
a casual greeting often used in the United States, particularly in the southern
and western regions. It''s a contraction of \"how do you do\", and it''s equivalent
to saying \"hello\". The task is to simply say \"howdy\".```"}, {"role": "system",
"content": "The schema should have the following structure, only two keys:\n-
tool_name: str\n- arguments: dict (with all arguments being passed)\n\nExample:\n{\"tool_name\":
\"tool name\", \"arguments\": {\"arg_name1\": \"value\", \"arg_name2\": 2}}"}],
"model": "gpt-4", "tool_choice": {"type": "function", "function": {"name": "InstructorToolCalling"}},
"tools": [{"type": "function", "function": {"name": "InstructorToolCalling",
"description": "Correctly extracted `InstructorToolCalling` with all the required
parameters with correct types", "parameters": {"properties": {"tool_name": {"description":
"The name of the tool to be called.", "title": "Tool Name", "type": "string"},
"arguments": {"anyOf": [{"type": "object"}, {"type": "null"}], "description":
"A dictinary of arguments to be passed to the tool.", "title": "Arguments"}},
"required": ["arguments", "tool_name"], "type": "object"}}}]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, br
connection:
- keep-alive
content-length:
- '2664'
content-type:
- application/json
cookie:
- __cf_bm=5oZs63dZLAiJVsMY5uQ7ak9HK0tVOst7Zje28KTpk.k-1709096786-1.0-AYuae5CK3ehBFpLq3OlQljPTqek/CaKiflLrbel+Bswjwo8HdfZFsMN5VVlV4hNdqchTvTgAp4x8Ncj1tYT5Qa4=;
_cfuvid=0kgttv029QX2lRGuZX6J5VUNsz5xgHpcA.CpfE8vI7Q-1709096786875-0.0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
MTQSACC+b86653L6T7MEBfR1AcgDnkRNvVWsjO20fDadUlUVSv5SnYO15VIdl6oDVYlESGSDMFNS
LEIiPCgkEomtuaXbu4wHxMBNwZ57KADyFRlQuXS5bFp5PNtOvj6T3uRXbERmN69nL2Ty+tn750P/
bviSHhUA6fV/LjOn+mSJQew1QPjO5de4IoP+tDfvzSfTeY94T6MVCxlQ3ebHo8e9SX/IdXWpvuRE
Bv8KADg8bQF07v2IDGSQt3wVhkcG+CWAogqTAbmUfMouZHoEAGnr9hqEToRYlVXlsnQiyukdH9pg
kH7qRC4nH329ir/7g1Kfff0ZXyw3Lzq/6GY6qXhr13ZKrMAoxAjaJqNwDqDgGoO8N2EL9/dp/K4q
z5yID7WwFkAfO0EGdLDB2vZHwTVsycBSxcK1y4yNxhWyotTHG40rjpYe2WClyJYM3FJBKwaWvnLC
QMHUzy6tyOnkdljqptpdZLztbeZ98BcjMFfIijoyZyRtWAPDBzgEXvkR6lGtJdi43RN8XzIyxwbW
WpJfZS3BJzjUKksl7vhQYzbAKHW2IH1G9lt2mdMjtDvgr3TiouxgfIqAx4K8acMpcwyIXHsN6Qne
5LvaRhpy3KO/CV305wNUip12qNRaS1XVZn2+m8A3nU//UM74BwGxDzVjxCIMzLg5l1bwCZj1TSs7
JLfTQK198YklG042nMg58MlVQFs5dyD15GOHkWjdRr1OLq9S3kF0WeNtggwoZW3VzE4FcB6L6114
1aI2atPmy6wrDokMxuOBTERhDxBSvzcFv+kAEAeT8bzopka5II5cLnyoObbRx2ZYnAoDAw==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 85c6237aee5900dd-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- br
Content-Type:
- application/json
Date:
- Wed, 28 Feb 2024 05:06:35 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '4513'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299509'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 98ms
x-request-id:
- req_434efa7f0a5c47ce6c03a8b4ab18dfd7
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "You are Researcher.\nYou''re
love to sey howdy.\n\nYour personal goal is: Be super empathetic.To give my
final answer use the exact following format:\n\n```\nFinal Answer: [my expected
final answer, entire content of my most complete final answer goes here]\n```\nI
MUST use these formats, my jobs depends on it!\n\nCurrent Task: say howdy\nThis
is the context you''re working with:\nWe need to greet someone in a friendly,
informal way. The term \"howdy\" is a casual greeting often used in the United
States, particularly in the southern and western regions. It''s a contraction
of \"how do you do\", and it''s equivalent to saying \"hello\". The task is
to simply say \"howdy\".\n\n Begin! This is VERY important to you, your job
depends on it!\n\n\n"}], "model": "gpt-4", "n": 1, "stop": ["\nResult"], "stream":
true, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, br
connection:
- keep-alive
content-length:
- '880'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: 'data: {"id":"chatcmpl-8x6RHLmZq8uUkVMaJgVLqT5Hq01Uj","object":"chat.completion.chunk","created":1709096795,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6RHLmZq8uUkVMaJgVLqT5Hq01Uj","object":"chat.completion.chunk","created":1709096795,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6RHLmZq8uUkVMaJgVLqT5Hq01Uj","object":"chat.completion.chunk","created":1709096795,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
Answer"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6RHLmZq8uUkVMaJgVLqT5Hq01Uj","object":"chat.completion.chunk","created":1709096795,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6RHLmZq8uUkVMaJgVLqT5Hq01Uj","object":"chat.completion.chunk","created":1709096795,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
How"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6RHLmZq8uUkVMaJgVLqT5Hq01Uj","object":"chat.completion.chunk","created":1709096795,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"dy"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6RHLmZq8uUkVMaJgVLqT5Hq01Uj","object":"chat.completion.chunk","created":1709096795,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"!"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6RHLmZq8uUkVMaJgVLqT5Hq01Uj","object":"chat.completion.chunk","created":1709096795,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
data: [DONE]
'
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 85c6239af966a4c3-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Type:
- text/event-stream
Date:
- Wed, 28 Feb 2024 05:06:35 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=rQM_3i5vsgWA7RXqaEYRq1KSxd5JDXRScrpMeiwYZm0-1709096795-1.0-ASrbmefvM3BGg5hzZQYt26xfGYuAKFdDMlTLywyI5oK7E5/BO6bxjYQD+uPFr4MSTydfuvnr7l8LTIz3bxQFE7E=;
path=/; expires=Wed, 28-Feb-24 05:36:35 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=FF7KcQqaDaMEwpz4eGbgI.lNAMAlEVQAWHqsQtW2lzk-1709096795853-0.0-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '213'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299801'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 39ms
x-request-id:
- req_7d8573a76975cb05491e81b4321361a8
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "You are Crew Manager.\nYou are
a seasoned manager with a knack for getting the best out of your team.\nYou
are also known for your ability to delegate work to the right people, and to
ask the right questions to get the best out of your team.\nEven though you don''t
perform tasks by yourself, you have a lot of experience in the field, which
allows you to properly evaluate the work of your team members.\n\nYour personal
goal is: Manage the team to complete the task in the best way possible.I have
access to ONLY the following tools, I can use only these, use one at time:\n\nDelegate
work to co-worker: Delegate work to co-worker(coworker: str, task: str, context:
str) - Delegate a specific task to one of the following co-workers: - Researcher\nThe
input to this tool should be the coworker, the task you want them to do, and
ALL necessary context to exectue the task, they know nothing about the task,
so share absolute everything you know, don''t reference things but instead explain
them.\nAsk question to co-worker: Ask question to co-worker(coworker: str, question:
str, context: str) - Ask a specific question to one of the following co-workers:
- Researcher\nThe input to this tool should be the coworker, the question you
have for them, and ALL necessary context to ask the question properly, they
know nothing about the question, so share absolute everything you know, don''t
reference things but instead explain them.\n\nTo use a tool I MUST use the exact
following format:\n\n```\nUse Tool: the tool I wanna use, should be one of [Delegate
work to co-worker, Ask question to co-worker] and absolute all relevant input
and context for using the tool, I must use only one tool at once.\nResult: [result
of the tool]\n```\n\nTo give my final answer I''ll use the exact following format:\n\n```\nFinal
Answer: [my expected final answer, entire content of my most complete final
answer goes here]\n```\nI MUST use these formats, my jobs depends on it!\n\nCurrent
Task: say howdy\nYour final answer must be: Howdy!\n\n Begin! This is VERY important
to you, your job depends on it!\n\n\nUse Tool: Delegate work to co-worker\ncoworker:
Researcher\ntask: say howdy\ncontext: We need to greet someone in a friendly,
informal way. The term \"howdy\" is a casual greeting often used in the United
States, particularly in the southern and western regions. It''s a contraction
of \"how do you do\", and it''s equivalent to saying \"hello\". The task is
to simply say \"howdy\".\nResult: Howdy!\n\nIf I don''t need to use any more
tools, I must make sure use the correct format to give my final answer:\n\n```Final
Answer: [my expected final answer, entire content of my most complete final
answer goes here]```\n I MUST use these formats, my jobs depends on it!\n"}],
"model": "gpt-4", "n": 1, "stop": ["\nResult"], "stream": true, "temperature":
0.0}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, br
connection:
- keep-alive
content-length:
- '2889'
content-type:
- application/json
cookie:
- __cf_bm=5oZs63dZLAiJVsMY5uQ7ak9HK0tVOst7Zje28KTpk.k-1709096786-1.0-AYuae5CK3ehBFpLq3OlQljPTqek/CaKiflLrbel+Bswjwo8HdfZFsMN5VVlV4hNdqchTvTgAp4x8Ncj1tYT5Qa4=;
_cfuvid=0kgttv029QX2lRGuZX6J5VUNsz5xgHpcA.CpfE8vI7Q-1709096786875-0.0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: 'data: {"id":"chatcmpl-8x6RIEFlgVwMeEcLBEYUfNyt4TKNo","object":"chat.completion.chunk","created":1709096796,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6RIEFlgVwMeEcLBEYUfNyt4TKNo","object":"chat.completion.chunk","created":1709096796,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6RIEFlgVwMeEcLBEYUfNyt4TKNo","object":"chat.completion.chunk","created":1709096796,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
Answer"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6RIEFlgVwMeEcLBEYUfNyt4TKNo","object":"chat.completion.chunk","created":1709096796,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6RIEFlgVwMeEcLBEYUfNyt4TKNo","object":"chat.completion.chunk","created":1709096796,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
How"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6RIEFlgVwMeEcLBEYUfNyt4TKNo","object":"chat.completion.chunk","created":1709096796,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"dy"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6RIEFlgVwMeEcLBEYUfNyt4TKNo","object":"chat.completion.chunk","created":1709096796,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"!"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6RIEFlgVwMeEcLBEYUfNyt4TKNo","object":"chat.completion.chunk","created":1709096796,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
data: [DONE]
'
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 85c623a1696900dd-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Type:
- text/event-stream
Date:
- Wed, 28 Feb 2024 05:06:36 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '288'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299305'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 138ms
x-request-id:
- req_1000191581c321470f8139624b7cb534
status:
code: 200
message: OK
version: 1

View File

@@ -0,0 +1,128 @@
interactions:
- request:
body: '{"messages": [{"role": "user", "content": "You are Researcher.\nYou''re
love to sey howdy.\n\nYour personal goal is: Be super empathetic.To give my
final answer use the exact following format:\n\n```\nFinal Answer: [my expected
final answer, entire content of my most complete final answer goes here]\n```\nI
MUST use these formats, my jobs depends on it!\n\nCurrent Task: say howdy\nYour
final answer must be: Howdy!\n\n Begin! This is VERY important to you, your
job depends on it!\n\n\n"}], "model": "gpt-4", "n": 1, "stop": ["\nResult"],
"stream": true, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, br
connection:
- keep-alive
content-length:
- '576'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: 'data: {"id":"chatcmpl-8x6MvBpPA2nu9oAtIveDbDV6gr0jF","object":"chat.completion.chunk","created":1709096525,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6MvBpPA2nu9oAtIveDbDV6gr0jF","object":"chat.completion.chunk","created":1709096525,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6MvBpPA2nu9oAtIveDbDV6gr0jF","object":"chat.completion.chunk","created":1709096525,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
Answer"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6MvBpPA2nu9oAtIveDbDV6gr0jF","object":"chat.completion.chunk","created":1709096525,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6MvBpPA2nu9oAtIveDbDV6gr0jF","object":"chat.completion.chunk","created":1709096525,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
How"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6MvBpPA2nu9oAtIveDbDV6gr0jF","object":"chat.completion.chunk","created":1709096525,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"dy"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6MvBpPA2nu9oAtIveDbDV6gr0jF","object":"chat.completion.chunk","created":1709096525,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"!"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6MvBpPA2nu9oAtIveDbDV6gr0jF","object":"chat.completion.chunk","created":1709096525,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
data: [DONE]
'
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 85c61cfe5aa11abf-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Type:
- text/event-stream
Date:
- Wed, 28 Feb 2024 05:02:05 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=t1wx1UUHIo40OZhmzvw5Lfl6GU6rBSOqgVYaroZ_oPY-1709096525-1.0-Adu6235ja9DptuJzagyyjisq3WtY+DAJocWHBAx6XJtDZqQ8F9/xqFdCEsEA1j50+7qghMgFIbQ8zjJTiZZR9jE=;
path=/; expires=Wed, 28-Feb-24 05:32:05 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=mfFvKXqWOwPiPpWZYg93kKab4M0lyRh10j5DBKVWpZs-1709096525319-0.0-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '169'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299875'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 25ms
x-request-id:
- req_bcadd3e1e6d7917b5f4013487e66ce3b
status:
code: 200
message: OK
version: 1

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,128 @@
interactions:
- request:
body: '{"messages": [{"role": "user", "content": "You are Researcher.\nYou''re
love to sey howdy.\n\nYour personal goal is: Be super empathetic.To give my
final answer use the exact following format:\n\n```\nFinal Answer: [my expected
final answer, entire content of my most complete final answer goes here]\n```\nI
MUST use these formats, my jobs depends on it!\n\nCurrent Task: say howdy\nYour
final answer must be: Howdy!\n\n Begin! This is VERY important to you, your
job depends on it!\n\n\n"}], "model": "gpt-4", "n": 1, "stop": ["\nResult"],
"stream": true, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, br
connection:
- keep-alive
content-length:
- '576'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: 'data: {"id":"chatcmpl-8x6VCJjr1jrfGEqp8rs8S70MMlNF0","object":"chat.completion.chunk","created":1709097038,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6VCJjr1jrfGEqp8rs8S70MMlNF0","object":"chat.completion.chunk","created":1709097038,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6VCJjr1jrfGEqp8rs8S70MMlNF0","object":"chat.completion.chunk","created":1709097038,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
Answer"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6VCJjr1jrfGEqp8rs8S70MMlNF0","object":"chat.completion.chunk","created":1709097038,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6VCJjr1jrfGEqp8rs8S70MMlNF0","object":"chat.completion.chunk","created":1709097038,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
How"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6VCJjr1jrfGEqp8rs8S70MMlNF0","object":"chat.completion.chunk","created":1709097038,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"dy"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6VCJjr1jrfGEqp8rs8S70MMlNF0","object":"chat.completion.chunk","created":1709097038,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"!"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6VCJjr1jrfGEqp8rs8S70MMlNF0","object":"chat.completion.chunk","created":1709097038,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
data: [DONE]
'
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 85c6298c5f1800f2-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Type:
- text/event-stream
Date:
- Wed, 28 Feb 2024 05:10:39 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=d2ICjM2qYQhS2M37l0hI5SS.xRz8EjklLB.2h7ZIrfM-1709097039-1.0-AaE39xkM2MDu0fdcAOQjTQKyC+FU657D+YE0JBSEhromKV2iHVf/UfmGInkLBt0CXrhLf3w6il+xyCUhogjNACw=;
path=/; expires=Wed, 28-Feb-24 05:40:39 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=wtrchvMDCF4qH99ZZbYs3XyIWUSjktLudGDSC4IAnro-1709097039247-0.0-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '163'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299875'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 25ms
x-request-id:
- req_e37a897b189f6ae6544b520ebcdbf005
status:
code: 200
message: OK
version: 1

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,81 +1,21 @@
interactions:
- request:
body: !!binary |
CuwKCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkSwwoKEgoQY3Jld2FpLnRl
bGVtZXRyeRLMAQoQwp0SIfudEwsrKbf+lo58IBIIf611tHXnM6YqClRvb2wgVXNhZ2UwATlwwj4a
iF2zF0FImT8aiF2zF0ofCgl0b29sX25hbWUSEgoQZ2V0X2ZpbmFsX2Fuc3dlckoOCghhdHRlbXB0
cxICGAFKWQoDbGxtElIKUHsibmFtZSI6IG51bGwsICJtb2RlbF9uYW1lIjogImdwdC00IiwgInRl
bXBlcmF0dXJlIjogMC43LCAiY2xhc3MiOiAiQ2hhdE9wZW5BSSJ9egIYARLdCAoQOtWPiAfm5nEl
WCn75Vb4mRII6VcVnnypR+gqDENyZXcgQ3JlYXRlZDABOQjIxNKIXbMXQbjyxtKIXbMXShoKDmNy
ZXdhaV92ZXJzaW9uEggKBjAuMTAuMkoaCg5weXRob25fdmVyc2lvbhIICgYzLjExLjdKMQoHY3Jl
d19pZBImCiQ4ZjMyMmYyNS1jYmIyLTRhZmQtOWY1MC03MmRjYWIxOGUzOTlKHAoMY3Jld19wcm9j
ZXNzEgwKCnNlcXVlbnRpYWxKFQoNY3Jld19sYW5ndWFnZRIECgJlbkoaChRjcmV3X251bWJlcl9v
Zl90YXNrcxICGAJKGwoVY3Jld19udW1iZXJfb2ZfYWdlbnRzEgIYAUrKAgoLY3Jld19hZ2VudHMS
ugIKtwJbeyJpZCI6ICI0YzZiNzM0Mi1iZThiLTRiMTItYTQ1Zi0yMDIwNmU0NWQwNTQiLCAicm9s
ZSI6ICJ0ZXN0IHJvbGUiLCAibWVtb3J5X2VuYWJsZWQ/IjogdHJ1ZSwgInZlcmJvc2U/IjogdHJ1
ZSwgIm1heF9pdGVyIjogMTUsICJtYXhfcnBtIjogbnVsbCwgImkxOG4iOiAiZW4iLCAibGxtIjog
IntcIm5hbWVcIjogbnVsbCwgXCJtb2RlbF9uYW1lXCI6IFwiZ3B0LTRcIiwgXCJ0ZW1wZXJhdHVy
ZVwiOiAwLjcsIFwiY2xhc3NcIjogXCJDaGF0T3BlbkFJXCJ9IiwgImRlbGVnYXRpb25fZW5hYmxl
ZD8iOiBmYWxzZSwgInRvb2xzX25hbWVzIjogW119XUqEAgoKY3Jld190YXNrcxL1AQryAVt7Imlk
IjogImJkMGU1OWRhLTc3NDktNDlmMS1iZjEyLWQ2ZjcyMDkyMmZjOSIsICJhc3luY19leGVjdXRp
b24/IjogZmFsc2UsICJhZ2VudF9yb2xlIjogInRlc3Qgcm9sZSIsICJ0b29sc19uYW1lcyI6IFtd
fSwgeyJpZCI6ICJlNWUxNGIwNS0xZmY5LTQ5OTktOWQ4NS04YjdlMzRiZjA0ZDgiLCAiYXN5bmNf
ZXhlY3V0aW9uPyI6IGZhbHNlLCAiYWdlbnRfcm9sZSI6ICJ0ZXN0IHJvbGUiLCAidG9vbHNfbmFt
ZXMiOiBbXX1dSigKCHBsYXRmb3JtEhwKGm1hY09TLTE0LjMtYXJtNjQtYXJtLTY0Yml0ShwKEHBs
YXRmb3JtX3JlbGVhc2USCAoGMjMuMy4wShsKD3BsYXRmb3JtX3N5c3RlbRIICgZEYXJ3aW5KewoQ
cGxhdGZvcm1fdmVyc2lvbhJnCmVEYXJ3aW4gS2VybmVsIFZlcnNpb24gMjMuMy4wOiBXZWQgRGVj
IDIwIDIxOjMwOjU5IFBTVCAyMDIzOyByb290OnhudS0xMDAwMi44MS41fjcvUkVMRUFTRV9BUk02
NF9UNjAzMEoKCgRjcHVzEgIYDHoCGAE=
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '1391'
Content-Type:
- application/x-protobuf
User-Agent:
- OTel-OTLP-Exporter-Python/1.22.0
method: POST
uri: http://telemetry.crewai.com:4318/v1/traces
response:
body:
string: "\n\0"
headers:
Content-Length:
- '2'
Content-Type:
- application/x-protobuf
Date:
- Tue, 13 Feb 2024 08:05:26 GMT
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\n('''',)\n\nTo use a tool, please use the exact following format:\n\n```\nThought:
Do I need to use a tool? Yes\nAction: the tool you wanna use, should be one
of [], just the name.\nAction Input: Any and all relevant information input
and context for using the tool\nObservation: the result of using the tool\n```\n\nWhen
you have a response for your task, or if you do not need to use a tool, you
MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer:
[your response here]```This is the summary of your work so far:\nBegin! This
is VERY important to you, your job depends on it!\n\nCurrent Task: just say
hi!\n"}], "model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": true,
personal goal is: test goalTo give my final answer use the exact following format:\n\n```\nFinal
Answer: [my expected final answer, entire content of my most complete final
answer goes here]\n```\nI MUST use these formats, my jobs depends on it!\n\nCurrent
Task: just say hi!\n\n Begin! This is VERY important to you, your job depends
on it!\n\n\n"}], "model": "gpt-4", "n": 1, "stop": ["\nResult"], "stream": true,
"temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
- gzip, deflate, br
connection:
- keep-alive
content-length:
- '904'
- '521'
content-type:
- application/json
host:
@@ -100,71 +40,27 @@ interactions:
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: 'data: {"id":"chatcmpl-8ri580menKdx2UwVSxcCvbrHE69Ui","object":"chat.completion.chunk","created":1707811526,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
string: 'data: {"id":"chatcmpl-8x6vNrtf0qKpk299ZRwX2pR8oyOZc","object":"chat.completion.chunk","created":1709098661,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri580menKdx2UwVSxcCvbrHE69Ui","object":"chat.completion.chunk","created":1707811526,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6vNrtf0qKpk299ZRwX2pR8oyOZc","object":"chat.completion.chunk","created":1709098661,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri580menKdx2UwVSxcCvbrHE69Ui","object":"chat.completion.chunk","created":1707811526,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri580menKdx2UwVSxcCvbrHE69Ui","object":"chat.completion.chunk","created":1707811526,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
Do"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri580menKdx2UwVSxcCvbrHE69Ui","object":"chat.completion.chunk","created":1707811526,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
I"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri580menKdx2UwVSxcCvbrHE69Ui","object":"chat.completion.chunk","created":1707811526,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
need"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri580menKdx2UwVSxcCvbrHE69Ui","object":"chat.completion.chunk","created":1707811526,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
to"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri580menKdx2UwVSxcCvbrHE69Ui","object":"chat.completion.chunk","created":1707811526,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
use"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri580menKdx2UwVSxcCvbrHE69Ui","object":"chat.completion.chunk","created":1707811526,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
a"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri580menKdx2UwVSxcCvbrHE69Ui","object":"chat.completion.chunk","created":1707811526,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
tool"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri580menKdx2UwVSxcCvbrHE69Ui","object":"chat.completion.chunk","created":1707811526,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"?"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri580menKdx2UwVSxcCvbrHE69Ui","object":"chat.completion.chunk","created":1707811526,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
No"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri580menKdx2UwVSxcCvbrHE69Ui","object":"chat.completion.chunk","created":1707811526,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri580menKdx2UwVSxcCvbrHE69Ui","object":"chat.completion.chunk","created":1707811526,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri580menKdx2UwVSxcCvbrHE69Ui","object":"chat.completion.chunk","created":1707811526,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
data: {"id":"chatcmpl-8x6vNrtf0qKpk299ZRwX2pR8oyOZc","object":"chat.completion.chunk","created":1709098661,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
Answer"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri580menKdx2UwVSxcCvbrHE69Ui","object":"chat.completion.chunk","created":1707811526,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6vNrtf0qKpk299ZRwX2pR8oyOZc","object":"chat.completion.chunk","created":1709098661,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri580menKdx2UwVSxcCvbrHE69Ui","object":"chat.completion.chunk","created":1707811526,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
data: {"id":"chatcmpl-8x6vNrtf0qKpk299ZRwX2pR8oyOZc","object":"chat.completion.chunk","created":1709098661,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
Hi"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri580menKdx2UwVSxcCvbrHE69Ui","object":"chat.completion.chunk","created":1707811526,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"!"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6vNrtf0qKpk299ZRwX2pR8oyOZc","object":"chat.completion.chunk","created":1709098661,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"!"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri580menKdx2UwVSxcCvbrHE69Ui","object":"chat.completion.chunk","created":1707811526,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
data: {"id":"chatcmpl-8x6vNrtf0qKpk299ZRwX2pR8oyOZc","object":"chat.completion.chunk","created":1709098661,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
data: [DONE]
@@ -175,7 +71,7 @@ interactions:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 854b90f5ce86fb28-SJC
- 85c651289e4901c2-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
@@ -183,14 +79,14 @@ interactions:
Content-Type:
- text/event-stream
Date:
- Tue, 13 Feb 2024 08:05:26 GMT
- Wed, 28 Feb 2024 05:37:41 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=eZx_Cc28AsZ4sE9XhSDROTXe.zTSX.5NABIk4QNh4rE-1707811526-1-AUSW1VrxOPxZjbDBkaJGjn3RvnxQi2anKBjm3rtF34M+3WVMXKZnsuFT1NyLSbUlKlHLmk+tH0BFBkkjVf1KNAQ=;
path=/; expires=Tue, 13-Feb-24 08:35:26 GMT; domain=.api.openai.com; HttpOnly;
- __cf_bm=l868Q2j0y1hEh_PyWbaEvyTSK_.xOIvrr6NOCGhTWOs-1709098661-1.0-AcmzcQgeapeNgYWJafurm0jZWKoXZcTElp7cKuollHPUXu89+ZMZ7C+cuD743jI6ZAjoMKnWILI6RpaTc+HijqI=;
path=/; expires=Wed, 28-Feb-24 06:07:41 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=1hOKQMgKuc9NQV1lVNIkVHpksu9kDExwfGmwkHTeUl4-1707811526659-0-604800000;
- _cfuvid=jv_tDbSPpAoMExt5fhPfzf1WpwjAwVWmq6ao6tfaPKg-1709098661891-0.0-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
@@ -201,9 +97,9 @@ interactions:
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
- crewai-iuxna1
openai-processing-ms:
- '400'
- '381'
openai-version:
- '2020-10-01'
strict-transport-security:
@@ -215,151 +111,38 @@ interactions:
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299796'
- '299889'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 40ms
- 22ms
x-request-id:
- req_1e2e3f72498b1c3f5bdfb527b6808aa3
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
lines of conversation provided, adding onto the previous summary returning a
new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks
of artificial intelligence. The AI thinks artificial intelligence is a force
for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial
intelligence is a force for good?\nAI: Because artificial intelligence will
help humans reach their full potential.\n\nNew summary:\nThe human asks what
the AI thinks of artificial intelligence. The AI thinks artificial intelligence
is a force for good because it will help humans reach their full potential.\nEND
OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: just
say hi!\nAI: Hi!\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false,
"temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '867'
content-type:
- application/json
cookie:
- __cf_bm=eZx_Cc28AsZ4sE9XhSDROTXe.zTSX.5NABIk4QNh4rE-1707811526-1-AUSW1VrxOPxZjbDBkaJGjn3RvnxQi2anKBjm3rtF34M+3WVMXKZnsuFT1NyLSbUlKlHLmk+tH0BFBkkjVf1KNAQ=;
_cfuvid=1hOKQMgKuc9NQV1lVNIkVHpksu9kDExwfGmwkHTeUl4-1707811526659-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SQW0sDMRCF3/dXjHlupVu3F/etKHgBhUIFxUpJs9Pd6CaTJrOoSP+7ZLtt9SWQ
OTmT75yfBEDoQuQgVCVZGVf3p16PZvf2+uVmsZlvJ09XzzR3j3qLlw+WRS86aP2Oig+uc0XG1cia
7F5WHiVj3JpOBpNpmo6G01YwVGAdbaXjftYfjNOLzlGRVhhEDq8JAMBPe0Y2W+CXyGHQO0wMhiBL
FPnxEYDwVMeJkCHowLLj7ERFltG2uIsKoWqMtKBtYN8oDsAVwuwOmKD0iBzvpgfSFgfFY3BkiwCf
miuQEHSMC0txq8+WQnQf7Y6ENZXO0zqmsU1dH+cbbXWoVh5lIBtpApPb23cJwFvbRPMvnHCejOMV
0wfauDDNsv0+cSr9pA67mgQTy/qPa5wlHaEI34HRrDbaluid120xkTPZJb8AAAD//wMADZpmMA8C
AAA=
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 854b91035a06fb28-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 13 Feb 2024 08:05:30 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '1880'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299799'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 40ms
x-request-id:
- req_939914d4d3f3e4fc959d143817d71fbc
- req_c29a9ff74ee32c982c0657b336101735
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
personal goal is: test goalTOOLS:\n------\nYou have access to only the following
tools:\n\n('''',)\n\nTo use a tool, please use the exact following format:\n\n```\nThought:
Do I need to use a tool? Yes\nAction: the tool you wanna use, should be one
of [], just the name.\nAction Input: Any and all relevant information input
and context for using the tool\nObservation: the result of using the tool\n```\n\nWhen
you have a response for your task, or if you do not need to use a tool, you
MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer:
[your response here]```This is the summary of your work so far:\nThe human instructs
the AI to greet them, and the AI responds with a simple \"Hi!\"Begin! This is
VERY important to you, your job depends on it!\n\nCurrent Task: just say hello!\nThis
is the context you''re working with:\nHi!\n"}], "model": "gpt-4", "n": 1, "stop":
["\nObservation"], "stream": true, "temperature": 0.7}'
personal goal is: test goalTo give my final answer use the exact following format:\n\n```\nFinal
Answer: [my expected final answer, entire content of my most complete final
answer goes here]\n```\nI MUST use these formats, my jobs depends on it!\n\nCurrent
Task: just say hello!\nThis is the context you''re working with:\nHi!\n\n Begin!
This is VERY important to you, your job depends on it!\n\n\n"}], "model": "gpt-4",
"n": 1, "stop": ["\nResult"], "stream": true, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
- gzip, deflate, br
connection:
- keep-alive
content-length:
- '1037'
- '571'
content-type:
- application/json
cookie:
- __cf_bm=eZx_Cc28AsZ4sE9XhSDROTXe.zTSX.5NABIk4QNh4rE-1707811526-1-AUSW1VrxOPxZjbDBkaJGjn3RvnxQi2anKBjm3rtF34M+3WVMXKZnsuFT1NyLSbUlKlHLmk+tH0BFBkkjVf1KNAQ=;
_cfuvid=1hOKQMgKuc9NQV1lVNIkVHpksu9kDExwfGmwkHTeUl4-1707811526659-0-604800000
- __cf_bm=l868Q2j0y1hEh_PyWbaEvyTSK_.xOIvrr6NOCGhTWOs-1709098661-1.0-AcmzcQgeapeNgYWJafurm0jZWKoXZcTElp7cKuollHPUXu89+ZMZ7C+cuD743jI6ZAjoMKnWILI6RpaTc+HijqI=;
_cfuvid=jv_tDbSPpAoMExt5fhPfzf1WpwjAwVWmq6ao6tfaPKg-1709098661891-0.0-604800000
host:
- api.openai.com
user-agent:
@@ -382,71 +165,27 @@ interactions:
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: 'data: {"id":"chatcmpl-8ri5CPtraPfqxGNtaNwyQfaesdGAb","object":"chat.completion.chunk","created":1707811530,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
string: 'data: {"id":"chatcmpl-8x6vOxu9SxkG3t54pHLQT2Pt0iQ63","object":"chat.completion.chunk","created":1709098662,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri5CPtraPfqxGNtaNwyQfaesdGAb","object":"chat.completion.chunk","created":1707811530,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6vOxu9SxkG3t54pHLQT2Pt0iQ63","object":"chat.completion.chunk","created":1709098662,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri5CPtraPfqxGNtaNwyQfaesdGAb","object":"chat.completion.chunk","created":1707811530,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri5CPtraPfqxGNtaNwyQfaesdGAb","object":"chat.completion.chunk","created":1707811530,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
Do"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri5CPtraPfqxGNtaNwyQfaesdGAb","object":"chat.completion.chunk","created":1707811530,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
I"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri5CPtraPfqxGNtaNwyQfaesdGAb","object":"chat.completion.chunk","created":1707811530,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
need"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri5CPtraPfqxGNtaNwyQfaesdGAb","object":"chat.completion.chunk","created":1707811530,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
to"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri5CPtraPfqxGNtaNwyQfaesdGAb","object":"chat.completion.chunk","created":1707811530,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
use"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri5CPtraPfqxGNtaNwyQfaesdGAb","object":"chat.completion.chunk","created":1707811530,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
a"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri5CPtraPfqxGNtaNwyQfaesdGAb","object":"chat.completion.chunk","created":1707811530,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
tool"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri5CPtraPfqxGNtaNwyQfaesdGAb","object":"chat.completion.chunk","created":1707811530,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"?"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri5CPtraPfqxGNtaNwyQfaesdGAb","object":"chat.completion.chunk","created":1707811530,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
No"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri5CPtraPfqxGNtaNwyQfaesdGAb","object":"chat.completion.chunk","created":1707811530,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri5CPtraPfqxGNtaNwyQfaesdGAb","object":"chat.completion.chunk","created":1707811530,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri5CPtraPfqxGNtaNwyQfaesdGAb","object":"chat.completion.chunk","created":1707811530,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
data: {"id":"chatcmpl-8x6vOxu9SxkG3t54pHLQT2Pt0iQ63","object":"chat.completion.chunk","created":1709098662,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
Answer"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri5CPtraPfqxGNtaNwyQfaesdGAb","object":"chat.completion.chunk","created":1707811530,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6vOxu9SxkG3t54pHLQT2Pt0iQ63","object":"chat.completion.chunk","created":1709098662,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri5CPtraPfqxGNtaNwyQfaesdGAb","object":"chat.completion.chunk","created":1707811530,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
data: {"id":"chatcmpl-8x6vOxu9SxkG3t54pHLQT2Pt0iQ63","object":"chat.completion.chunk","created":1709098662,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
Hello"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri5CPtraPfqxGNtaNwyQfaesdGAb","object":"chat.completion.chunk","created":1707811530,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"!"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6vOxu9SxkG3t54pHLQT2Pt0iQ63","object":"chat.completion.chunk","created":1709098662,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"!"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8ri5CPtraPfqxGNtaNwyQfaesdGAb","object":"chat.completion.chunk","created":1707811530,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
data: {"id":"chatcmpl-8x6vOxu9SxkG3t54pHLQT2Pt0iQ63","object":"chat.completion.chunk","created":1709098662,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
data: [DONE]
@@ -457,7 +196,7 @@ interactions:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 854b91126e42fb28-SJC
- 85c6512e293d01c2-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
@@ -465,7 +204,7 @@ interactions:
Content-Type:
- text/event-stream
Date:
- Tue, 13 Feb 2024 08:05:31 GMT
- Wed, 28 Feb 2024 05:37:42 GMT
Server:
- cloudflare
Transfer-Encoding:
@@ -477,9 +216,9 @@ interactions:
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
- crewai-iuxna1
openai-processing-ms:
- '360'
- '159'
openai-version:
- '2020-10-01'
strict-transport-security:
@@ -491,121 +230,13 @@ interactions:
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299765'
- '299876'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 47ms
- 24ms
x-request-id:
- req_9177fada9049cfa726cab195d9a942f5
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
lines of conversation provided, adding onto the previous summary returning a
new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks
of artificial intelligence. The AI thinks artificial intelligence is a force
for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial
intelligence is a force for good?\nAI: Because artificial intelligence will
help humans reach their full potential.\n\nNew summary:\nThe human asks what
the AI thinks of artificial intelligence. The AI thinks artificial intelligence
is a force for good because it will help humans reach their full potential.\nEND
OF EXAMPLE\n\nCurrent summary:\nThe human instructs the AI to greet them, and
the AI responds with a simple \"Hi!\"\n\nNew lines of conversation:\nHuman:
just say hello!\nThis is the context you''re working with:\nHi!\nAI: Hello!\n\nNew
summary:"}], "model": "gpt-4", "n": 1, "stream": false, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1003'
content-type:
- application/json
cookie:
- __cf_bm=eZx_Cc28AsZ4sE9XhSDROTXe.zTSX.5NABIk4QNh4rE-1707811526-1-AUSW1VrxOPxZjbDBkaJGjn3RvnxQi2anKBjm3rtF34M+3WVMXKZnsuFT1NyLSbUlKlHLmk+tH0BFBkkjVf1KNAQ=;
_cfuvid=1hOKQMgKuc9NQV1lVNIkVHpksu9kDExwfGmwkHTeUl4-1707811526659-0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1xRy24bMQy871cwPK8DO4kf8C0NgqaHokDaS1EXhqKld5VIoiBy0wSB/73Qrh9o
LwI0wyGGMx8VALoG14C2M2pD8pNVdvN7r99vv67e7Psjy7ebsLj79PmnfV0o1kXBT89k9ai6tByS
J3UcR9pmMkpl62w5Xa5ms/n11UAEbsgXWZt0cjOZLmbXB0XHzpLgGn5VAAAfw1u8xYbecA3T+ogE
EjEt4fo0BICZfUHQiDhRE0efB9JyVIqD3R8dQdcHE8FF0dxbFdCO4PYLKEObibT8Qw07l0WPXCZJ
HBuBP047MLDBB3exQTCxKSOxhj5xhF2ftaN82u041uD0P/kGH8h7vtjgJR5M7k/XeW5T5qeSROy9
P+E7F51020xGOJZLRDmN8n0F8HtIsf8nGEyZQ9Kt8gtFGcqYj/vwXNiZHSsCQGU1/oxfTZfVwSHK
uyiF7c7FlnLKbgi1+Kz21V8AAAD//wMAt4zwVksCAAA=
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 854b911cff76fb28-SJC
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 13 Feb 2024 08:05:35 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- user-z7g4wmlazxqvc5wjyaaaocfz
openai-processing-ms:
- '2599'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299765'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 46ms
x-request-id:
- req_9590d08c9df508c18c924e19e4e0055d
- req_06762f5bedd260cb2740a35c45c78e4e
status:
code: 200
message: OK

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,153 @@
interactions:
- request:
body: '{"messages": [{"role": "user", "content": "You are Manager.\nYou''re great
at delegating work about scoring.\n\nYour personal goal is: Coordinate scoring
processesTo give my final answer use the exact following format:\n\n```\nFinal
Answer: [my expected final answer, entire content of my most complete final
answer goes here]\n```\nI MUST use these formats, my jobs depends on it!\n\nCurrent
Task: Give me an integer score between 1-5 for the following title: ''The impact
of AI in the future of work''\nYour final answer must be: The score of the title.\n\n
Begin! This is VERY important to you, your job depends on it!\n\n\n"}], "model":
"gpt-4", "n": 1, "stop": ["\nResult"], "stream": true, "temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, br
connection:
- keep-alive
content-length:
- '712'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: 'data: {"id":"chatcmpl-8x6hAnJdNQ95CMhSACL5TNL0lG6Ws","object":"chat.completion.chunk","created":1709097780,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6hAnJdNQ95CMhSACL5TNL0lG6Ws","object":"chat.completion.chunk","created":1709097780,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6hAnJdNQ95CMhSACL5TNL0lG6Ws","object":"chat.completion.chunk","created":1709097780,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
Answer"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6hAnJdNQ95CMhSACL5TNL0lG6Ws","object":"chat.completion.chunk","created":1709097780,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6hAnJdNQ95CMhSACL5TNL0lG6Ws","object":"chat.completion.chunk","created":1709097780,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
The"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6hAnJdNQ95CMhSACL5TNL0lG6Ws","object":"chat.completion.chunk","created":1709097780,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
score"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6hAnJdNQ95CMhSACL5TNL0lG6Ws","object":"chat.completion.chunk","created":1709097780,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
of"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6hAnJdNQ95CMhSACL5TNL0lG6Ws","object":"chat.completion.chunk","created":1709097780,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
the"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6hAnJdNQ95CMhSACL5TNL0lG6Ws","object":"chat.completion.chunk","created":1709097780,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
title"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6hAnJdNQ95CMhSACL5TNL0lG6Ws","object":"chat.completion.chunk","created":1709097780,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
is"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6hAnJdNQ95CMhSACL5TNL0lG6Ws","object":"chat.completion.chunk","created":1709097780,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6hAnJdNQ95CMhSACL5TNL0lG6Ws","object":"chat.completion.chunk","created":1709097780,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"4"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6hAnJdNQ95CMhSACL5TNL0lG6Ws","object":"chat.completion.chunk","created":1709097780,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6hAnJdNQ95CMhSACL5TNL0lG6Ws","object":"chat.completion.chunk","created":1709097780,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
data: [DONE]
'
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 85c63ba94ea60110-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Type:
- text/event-stream
Date:
- Wed, 28 Feb 2024 05:23:01 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=AaCQFIZM8yehA4h1745UTRRtL0FczZJtdLfNQ6_8NzA-1709097781-1.0-AUIh6/dxRTiveEa2WnhkSYSTau7hn7cRLNnlSfeiJp2fgTieIadq3fkeBHjqHSnQ7k/pE4WZgIZ9SAAmacifrgc=;
path=/; expires=Wed, 28-Feb-24 05:53:01 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=o.lLAcb8kPLRizp5FDtYBR4rjdIgMyVXhQ_NLWlcuj8-1709097781239-0.0-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '224'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299840'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 31ms
x-request-id:
- req_3129f92f1bc422dba1aa396cc072a30e
status:
code: 200
message: OK
version: 1

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -2,12 +2,12 @@ interactions:
- request:
body: '{"messages": [{"role": "user", "content": "You are Scorer.\nYou''re an
expert scorer, specialized in scoring titles.\n\nYour personal goal is: Score
the titleTo complete the task you MUST follow the format:\n\n```\nFinal Answer:
[your most complete final answer goes here]\n``` You must use these formats,
my life depends on it.This is the summary of your work so far:\nBegin! This
is VERY important to you, your job depends on it!\n\nCurrent Task: Give me an
integer score between 1-5 for the following title: ''The impact of AI in the
future of work''\nYour final answer must be: The score of the title.\n"}], "model":
the titleTo give my final answer use the exact following format:\n\n```\nFinal
Answer: [my expected final answer, entire content of my most complete final
answer goes here]\n```\nI MUST use these formats, my jobs depends on it!\n\nCurrent
Task: Give me an integer score between 1-5 for the following title: ''The impact
of AI in the future of work''\nYour final answer must be: The score of the title.\n\n
Begin! This is VERY important to you, your job depends on it!\n\n\n"}], "model":
"gpt-4", "n": 1, "stop": ["\nResult"], "stream": true, "temperature": 0.7}'
headers:
accept:
@@ -17,7 +17,7 @@ interactions:
connection:
- keep-alive
content-length:
- '692'
- '707'
content-type:
- application/json
host:
@@ -42,27 +42,96 @@ interactions:
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: 'data: {"id":"chatcmpl-8uATyqiZf6iIskukmI84ugDm1H0G9","object":"chat.completion.chunk","created":1708397354,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
string: 'data: {"id":"chatcmpl-8x6vV33CeGSNI1HTLmNHUVUpnH6v9","object":"chat.completion.chunk","created":1709098669,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8uATyqiZf6iIskukmI84ugDm1H0G9","object":"chat.completion.chunk","created":1708397354,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6vV33CeGSNI1HTLmNHUVUpnH6v9","object":"chat.completion.chunk","created":1709098669,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8uATyqiZf6iIskukmI84ugDm1H0G9","object":"chat.completion.chunk","created":1708397354,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
data: {"id":"chatcmpl-8x6vV33CeGSNI1HTLmNHUVUpnH6v9","object":"chat.completion.chunk","created":1709098669,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
Answer"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8uATyqiZf6iIskukmI84ugDm1H0G9","object":"chat.completion.chunk","created":1708397354,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6vV33CeGSNI1HTLmNHUVUpnH6v9","object":"chat.completion.chunk","created":1709098669,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8uATyqiZf6iIskukmI84ugDm1H0G9","object":"chat.completion.chunk","created":1708397354,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
data: {"id":"chatcmpl-8x6vV33CeGSNI1HTLmNHUVUpnH6v9","object":"chat.completion.chunk","created":1709098669,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
The"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6vV33CeGSNI1HTLmNHUVUpnH6v9","object":"chat.completion.chunk","created":1709098669,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
score"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6vV33CeGSNI1HTLmNHUVUpnH6v9","object":"chat.completion.chunk","created":1709098669,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
of"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6vV33CeGSNI1HTLmNHUVUpnH6v9","object":"chat.completion.chunk","created":1709098669,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
the"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6vV33CeGSNI1HTLmNHUVUpnH6v9","object":"chat.completion.chunk","created":1709098669,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
title"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6vV33CeGSNI1HTLmNHUVUpnH6v9","object":"chat.completion.chunk","created":1709098669,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
''"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6vV33CeGSNI1HTLmNHUVUpnH6v9","object":"chat.completion.chunk","created":1709098669,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"The"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6vV33CeGSNI1HTLmNHUVUpnH6v9","object":"chat.completion.chunk","created":1709098669,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
impact"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6vV33CeGSNI1HTLmNHUVUpnH6v9","object":"chat.completion.chunk","created":1709098669,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
of"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6vV33CeGSNI1HTLmNHUVUpnH6v9","object":"chat.completion.chunk","created":1709098669,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
AI"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6vV33CeGSNI1HTLmNHUVUpnH6v9","object":"chat.completion.chunk","created":1709098669,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
in"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6vV33CeGSNI1HTLmNHUVUpnH6v9","object":"chat.completion.chunk","created":1709098669,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
the"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6vV33CeGSNI1HTLmNHUVUpnH6v9","object":"chat.completion.chunk","created":1709098669,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
future"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6vV33CeGSNI1HTLmNHUVUpnH6v9","object":"chat.completion.chunk","created":1709098669,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
of"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6vV33CeGSNI1HTLmNHUVUpnH6v9","object":"chat.completion.chunk","created":1709098669,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
work"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6vV33CeGSNI1HTLmNHUVUpnH6v9","object":"chat.completion.chunk","created":1709098669,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"''"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6vV33CeGSNI1HTLmNHUVUpnH6v9","object":"chat.completion.chunk","created":1709098669,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
is"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6vV33CeGSNI1HTLmNHUVUpnH6v9","object":"chat.completion.chunk","created":1709098669,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8uATyqiZf6iIskukmI84ugDm1H0G9","object":"chat.completion.chunk","created":1708397354,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"4"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6vV33CeGSNI1HTLmNHUVUpnH6v9","object":"chat.completion.chunk","created":1709098669,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"4"},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8uATyqiZf6iIskukmI84ugDm1H0G9","object":"chat.completion.chunk","created":1708397354,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
data: {"id":"chatcmpl-8x6vV33CeGSNI1HTLmNHUVUpnH6v9","object":"chat.completion.chunk","created":1709098669,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
data: {"id":"chatcmpl-8x6vV33CeGSNI1HTLmNHUVUpnH6v9","object":"chat.completion.chunk","created":1709098669,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
data: [DONE]
@@ -73,7 +142,7 @@ interactions:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 85836f679bbb1ab1-GRU
- 85c6515c5f1d010f-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
@@ -81,14 +150,14 @@ interactions:
Content-Type:
- text/event-stream
Date:
- Tue, 20 Feb 2024 02:49:14 GMT
- Wed, 28 Feb 2024 05:37:50 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=Ru2FKLJKCKm88s_DX8Mq9qJ47KVzLXTzKgnKsJTlABI-1708397354-1.0-AQh3Ie3Pr21iUgelcsLQAfZsyez2JIV9depVidctxAY5ZSSNr2vj5B7ht1meQxkc4krOrDbgy9Ljf5p74nOCQlc=;
path=/; expires=Tue, 20-Feb-24 03:19:14 GMT; domain=.api.openai.com; HttpOnly;
- __cf_bm=yJJOvK.mptdBnSOedMJ87OggCnwj18oNe7zJy.U.wLg-1709098670-1.0-ARWeX/RQGrEmYpidT4d8JhgrjaWyqwMX0lE4/NuXV6wQOwk42JIR5NEmoP9Ne95YgKLnPHrlShLb4NLnDwZ1/6E=;
path=/; expires=Wed, 28-Feb-24 06:07:50 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=vnpvno_JVcoaEfWoEMif6lomt7v63tB2n2IhYZJ5lT8-1708397354882-0.0-604800000;
- _cfuvid=jQ_UFtYKPdfJP2SPmfRNzj8gnrGG.JmYddiPcNvV0jU-1709098670103-0.0-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
@@ -101,7 +170,7 @@ interactions:
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '231'
- '320'
openai-version:
- '2020-10-01'
strict-transport-security:
@@ -113,285 +182,20 @@ interactions:
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299845'
- '299842'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 31ms
x-request-id:
- req_efeb5a1bdc189fd4a22c59e6c733c8cd
- req_b38a6691b43288fba573ff6677ae6769
status:
code: 200
message: OK
- request:
body: !!binary |
Cs04CiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkSpDgKEgoQY3Jld2FpLnRl
bGVtZXRyeRLdBwoQIVLkQNO9Zsy8V9JTBCR+bhIIpjD0t3O28r0qDENyZXcgQ3JlYXRlZDABOUhU
wGZXcrUXQdibxWZXcrUXShoKDmNyZXdhaV92ZXJzaW9uEggKBjAuMTQuMEoaCg5weXRob25fdmVy
c2lvbhIICgYzLjExLjdKMQoHY3Jld19pZBImCiRiMTk1ODhmMC0xMzg0LTQ0ZjYtYTE1NS05NWQ4
ZTU0NDcyYWJKHAoMY3Jld19wcm9jZXNzEgwKCnNlcXVlbnRpYWxKFQoNY3Jld19sYW5ndWFnZRIE
CgJlbkoaChRjcmV3X251bWJlcl9vZl90YXNrcxICGAFKGwoVY3Jld19udW1iZXJfb2ZfYWdlbnRz
EgIYAUrIAgoLY3Jld19hZ2VudHMSuAIKtQJbeyJpZCI6ICIxMTBlZDkwOS05ZDI4LTQ2ZDctYjBl
My03Yzg0M2RhZTE3N2UiLCAicm9sZSI6ICJTY29yZXIiLCAibWVtb3J5X2VuYWJsZWQ/IjogdHJ1
ZSwgInZlcmJvc2U/IjogZmFsc2UsICJtYXhfaXRlciI6IDE1LCAibWF4X3JwbSI6IG51bGwsICJp
MThuIjogImVuIiwgImxsbSI6ICJ7XCJuYW1lXCI6IG51bGwsIFwibW9kZWxfbmFtZVwiOiBcImdw
dC00XCIsIFwidGVtcGVyYXR1cmVcIjogMC43LCBcImNsYXNzXCI6IFwiQ2hhdE9wZW5BSVwifSIs
ICJkZWxlZ2F0aW9uX2VuYWJsZWQ/IjogZmFsc2UsICJ0b29sc19uYW1lcyI6IFtdfV1KhgEKCmNy
ZXdfdGFza3MSeAp2W3siaWQiOiAiODQ5Yjg0YzQtMDMyNS00MmUzLWIzZDctNDgwODdlMDFjY2U2
IiwgImFzeW5jX2V4ZWN1dGlvbj8iOiBmYWxzZSwgImFnZW50X3JvbGUiOiAiU2NvcmVyIiwgInRv
b2xzX25hbWVzIjogW119XUooCghwbGF0Zm9ybRIcChptYWNPUy0xNC4zLWFybTY0LWFybS02NGJp
dEocChBwbGF0Zm9ybV9yZWxlYXNlEggKBjIzLjMuMEobCg9wbGF0Zm9ybV9zeXN0ZW0SCAoGRGFy
d2luSnsKEHBsYXRmb3JtX3ZlcnNpb24SZwplRGFyd2luIEtlcm5lbCBWZXJzaW9uIDIzLjMuMDog
V2VkIERlYyAyMCAyMTozMDo1OSBQU1QgMjAyMzsgcm9vdDp4bnUtMTAwMDIuODEuNX43L1JFTEVB
U0VfQVJNNjRfVDYwMzBKCgoEY3B1cxICGAx6AhgBEt0HChDN6oSR2AQZEy+pmKLHoDwxEgh2vJ1J
wXzTYyoMQ3JldyBDcmVhdGVkMAE5IH8ealdytRdBiDggaldytRdKGgoOY3Jld2FpX3ZlcnNpb24S
CAoGMC4xNC4wShoKDnB5dGhvbl92ZXJzaW9uEggKBjMuMTEuN0oxCgdjcmV3X2lkEiYKJGIyZjFh
YjExLWJiNzEtNDVjYS1iOGZlLWVmOWIzMjk0MWM5ZEocCgxjcmV3X3Byb2Nlc3MSDAoKc2VxdWVu
dGlhbEoVCg1jcmV3X2xhbmd1YWdlEgQKAmVuShoKFGNyZXdfbnVtYmVyX29mX3Rhc2tzEgIYAUob
ChVjcmV3X251bWJlcl9vZl9hZ2VudHMSAhgBSsgCCgtjcmV3X2FnZW50cxK4Agq1Alt7ImlkIjog
IjE4ZDIwMDdmLWU1YjgtNDA0OC05NzNjLTNkZDA4N2UzOWZmOCIsICJyb2xlIjogIlNjb3JlciIs
ICJtZW1vcnlfZW5hYmxlZD8iOiB0cnVlLCAidmVyYm9zZT8iOiBmYWxzZSwgIm1heF9pdGVyIjog
MTUsICJtYXhfcnBtIjogbnVsbCwgImkxOG4iOiAiZW4iLCAibGxtIjogIntcIm5hbWVcIjogbnVs
bCwgXCJtb2RlbF9uYW1lXCI6IFwiZ3B0LTRcIiwgXCJ0ZW1wZXJhdHVyZVwiOiAwLjcsIFwiY2xh
c3NcIjogXCJDaGF0T3BlbkFJXCJ9IiwgImRlbGVnYXRpb25fZW5hYmxlZD8iOiBmYWxzZSwgInRv
b2xzX25hbWVzIjogW119XUqGAQoKY3Jld190YXNrcxJ4CnZbeyJpZCI6ICI0ODBhNmVlOS1jOGVi
LTQyOGItOGM1ZS03OWQwNGJmNjA3MGQiLCAiYXN5bmNfZXhlY3V0aW9uPyI6IGZhbHNlLCAiYWdl
bnRfcm9sZSI6ICJTY29yZXIiLCAidG9vbHNfbmFtZXMiOiBbXX1dSigKCHBsYXRmb3JtEhwKGm1h
Y09TLTE0LjMtYXJtNjQtYXJtLTY0Yml0ShwKEHBsYXRmb3JtX3JlbGVhc2USCAoGMjMuMy4wShsK
D3BsYXRmb3JtX3N5c3RlbRIICgZEYXJ3aW5KewoQcGxhdGZvcm1fdmVyc2lvbhJnCmVEYXJ3aW4g
S2VybmVsIFZlcnNpb24gMjMuMy4wOiBXZWQgRGVjIDIwIDIxOjMwOjU5IFBTVCAyMDIzOyByb290
OnhudS0xMDAwMi44MS41fjcvUkVMRUFTRV9BUk02NF9UNjAzMEoKCgRjcHVzEgIYDHoCGAES1QgK
EPP7YsejqlFJITpRoPvrVfYSCK0gcBCCXLLZKgxDcmV3IENyZWF0ZWQwATmgcWxtV3K1F0EQAG5t
V3K1F0oaCg5jcmV3YWlfdmVyc2lvbhIICgYwLjE0LjBKGgoOcHl0aG9uX3ZlcnNpb24SCAoGMy4x
MS43SjEKB2NyZXdfaWQSJgokZmY0ZWMyYmUtZmEyYy00ZWFiLWFhMDktOWE2MDVhN2E3YWYzShwK
DGNyZXdfcHJvY2VzcxIMCgpzZXF1ZW50aWFsShUKDWNyZXdfbGFuZ3VhZ2USBAoCZW5KGgoUY3Jl
d19udW1iZXJfb2ZfdGFza3MSAhgCShsKFWNyZXdfbnVtYmVyX29mX2FnZW50cxICGAFKyAIKC2Ny
ZXdfYWdlbnRzErgCCrUCW3siaWQiOiAiOTU1Zjg3ZTktZTdkZC00OTQwLTk0MzYtODU3YzE3ZDFi
YTFiIiwgInJvbGUiOiAiU2NvcmVyIiwgIm1lbW9yeV9lbmFibGVkPyI6IHRydWUsICJ2ZXJib3Nl
PyI6IGZhbHNlLCAibWF4X2l0ZXIiOiAxNSwgIm1heF9ycG0iOiBudWxsLCAiaTE4biI6ICJlbiIs
ICJsbG0iOiAie1wibmFtZVwiOiBudWxsLCBcIm1vZGVsX25hbWVcIjogXCJncHQtNFwiLCBcInRl
bXBlcmF0dXJlXCI6IDAuNywgXCJjbGFzc1wiOiBcIkNoYXRPcGVuQUlcIn0iLCAiZGVsZWdhdGlv
bl9lbmFibGVkPyI6IGZhbHNlLCAidG9vbHNfbmFtZXMiOiBbXX1dSv4BCgpjcmV3X3Rhc2tzEu8B
CuwBW3siaWQiOiAiMGUwMjcwZTEtMWYzMS00NGU1LTkyZjgtYzE3Zjg3MzNkOTVlIiwgImFzeW5j
X2V4ZWN1dGlvbj8iOiBmYWxzZSwgImFnZW50X3JvbGUiOiAiU2NvcmVyIiwgInRvb2xzX25hbWVz
IjogW119LCB7ImlkIjogImMxNGU0YzM1LWZlN2ItNDlmMC1hNjQ5LTRkMmI4MjBjNzkzNSIsICJh
c3luY19leGVjdXRpb24/IjogZmFsc2UsICJhZ2VudF9yb2xlIjogIlNjb3JlciIsICJ0b29sc19u
YW1lcyI6IFtdfV1KKAoIcGxhdGZvcm0SHAoabWFjT1MtMTQuMy1hcm02NC1hcm0tNjRiaXRKHAoQ
cGxhdGZvcm1fcmVsZWFzZRIICgYyMy4zLjBKGwoPcGxhdGZvcm1fc3lzdGVtEggKBkRhcndpbkp7
ChBwbGF0Zm9ybV92ZXJzaW9uEmcKZURhcndpbiBLZXJuZWwgVmVyc2lvbiAyMy4zLjA6IFdlZCBE
ZWMgMjAgMjE6MzA6NTkgUFNUIDIwMjM7IHJvb3Q6eG51LTEwMDAyLjgxLjV+Ny9SRUxFQVNFX0FS
TTY0X1Q2MDMwSgoKBGNwdXMSAhgMegIYARLVCAoQMhmG/09mSNoVDKMhdcK3LhIIeUQ8qBJuu4Uq
DENyZXcgQ3JlYXRlZDABOdC+H3RXcrUXQWCbIXRXcrUXShoKDmNyZXdhaV92ZXJzaW9uEggKBjAu
MTQuMEoaCg5weXRob25fdmVyc2lvbhIICgYzLjExLjdKMQoHY3Jld19pZBImCiRlNTFhNmY3Ny05
Njc0LTQ5Y2ItODU1OC00ODZkNDc3MmVhMmFKHAoMY3Jld19wcm9jZXNzEgwKCnNlcXVlbnRpYWxK
FQoNY3Jld19sYW5ndWFnZRIECgJlbkoaChRjcmV3X251bWJlcl9vZl90YXNrcxICGAJKGwoVY3Jl
d19udW1iZXJfb2ZfYWdlbnRzEgIYAUrIAgoLY3Jld19hZ2VudHMSuAIKtQJbeyJpZCI6ICI2MDZl
YjM5YS02NmZiLTRiMWItYjljNC00NDk2MGJiYjA3ZjEiLCAicm9sZSI6ICJTY29yZXIiLCAibWVt
b3J5X2VuYWJsZWQ/IjogdHJ1ZSwgInZlcmJvc2U/IjogZmFsc2UsICJtYXhfaXRlciI6IDE1LCAi
bWF4X3JwbSI6IG51bGwsICJpMThuIjogImVuIiwgImxsbSI6ICJ7XCJuYW1lXCI6IG51bGwsIFwi
bW9kZWxfbmFtZVwiOiBcImdwdC00XCIsIFwidGVtcGVyYXR1cmVcIjogMC43LCBcImNsYXNzXCI6
IFwiQ2hhdE9wZW5BSVwifSIsICJkZWxlZ2F0aW9uX2VuYWJsZWQ/IjogZmFsc2UsICJ0b29sc19u
YW1lcyI6IFtdfV1K/gEKCmNyZXdfdGFza3MS7wEK7AFbeyJpZCI6ICI2OGQzZjQyYS1jZmJhLTQ5
MGEtODNjNi0wYjNhMTZhNjdkYmIiLCAiYXN5bmNfZXhlY3V0aW9uPyI6IGZhbHNlLCAiYWdlbnRf
cm9sZSI6ICJTY29yZXIiLCAidG9vbHNfbmFtZXMiOiBbXX0sIHsiaWQiOiAiNzQ0ZThlZDktMTc3
YS00NGQ1LWFiYTMtZmI0OTZmMzQ0MWM3IiwgImFzeW5jX2V4ZWN1dGlvbj8iOiBmYWxzZSwgImFn
ZW50X3JvbGUiOiAiU2NvcmVyIiwgInRvb2xzX25hbWVzIjogW119XUooCghwbGF0Zm9ybRIcChpt
YWNPUy0xNC4zLWFybTY0LWFybS02NGJpdEocChBwbGF0Zm9ybV9yZWxlYXNlEggKBjIzLjMuMEob
Cg9wbGF0Zm9ybV9zeXN0ZW0SCAoGRGFyd2luSnsKEHBsYXRmb3JtX3ZlcnNpb24SZwplRGFyd2lu
IEtlcm5lbCBWZXJzaW9uIDIzLjMuMDogV2VkIERlYyAyMCAyMTozMDo1OSBQU1QgMjAyMzsgcm9v
dDp4bnUtMTAwMDIuODEuNX43L1JFTEVBU0VfQVJNNjRfVDYwMzBKCgoEY3B1cxICGAx6AhgBEt0H
ChAdP1n30tByxunJByeYqn3MEgijNV4D7oizKSoMQ3JldyBDcmVhdGVkMAE5KPPieVdytRdBqKjk
eVdytRdKGgoOY3Jld2FpX3ZlcnNpb24SCAoGMC4xNC4wShoKDnB5dGhvbl92ZXJzaW9uEggKBjMu
MTEuN0oxCgdjcmV3X2lkEiYKJDViNjk3YjZiLThmZmUtNDgxYy1hNzViLThiYmJkYjFlNjBhY0oc
CgxjcmV3X3Byb2Nlc3MSDAoKc2VxdWVudGlhbEoVCg1jcmV3X2xhbmd1YWdlEgQKAmVuShoKFGNy
ZXdfbnVtYmVyX29mX3Rhc2tzEgIYAUobChVjcmV3X251bWJlcl9vZl9hZ2VudHMSAhgBSsgCCgtj
cmV3X2FnZW50cxK4Agq1Alt7ImlkIjogIjQ4NzQzZGFhLTk5OWYtNDkwYy04YjlhLTFiODFlODBi
MjRhYiIsICJyb2xlIjogIlNjb3JlciIsICJtZW1vcnlfZW5hYmxlZD8iOiB0cnVlLCAidmVyYm9z
ZT8iOiBmYWxzZSwgIm1heF9pdGVyIjogMTUsICJtYXhfcnBtIjogbnVsbCwgImkxOG4iOiAiZW4i
LCAibGxtIjogIntcIm5hbWVcIjogbnVsbCwgXCJtb2RlbF9uYW1lXCI6IFwiZ3B0LTRcIiwgXCJ0
ZW1wZXJhdHVyZVwiOiAwLjcsIFwiY2xhc3NcIjogXCJDaGF0T3BlbkFJXCJ9IiwgImRlbGVnYXRp
b25fZW5hYmxlZD8iOiBmYWxzZSwgInRvb2xzX25hbWVzIjogW119XUqGAQoKY3Jld190YXNrcxJ4
CnZbeyJpZCI6ICJmZjA4MDJhNS01YTRiLTRiZTgtYmUwMy1hZWEwNDU5ODYxZmMiLCAiYXN5bmNf
ZXhlY3V0aW9uPyI6IGZhbHNlLCAiYWdlbnRfcm9sZSI6ICJTY29yZXIiLCAidG9vbHNfbmFtZXMi
OiBbXX1dSigKCHBsYXRmb3JtEhwKGm1hY09TLTE0LjMtYXJtNjQtYXJtLTY0Yml0ShwKEHBsYXRm
b3JtX3JlbGVhc2USCAoGMjMuMy4wShsKD3BsYXRmb3JtX3N5c3RlbRIICgZEYXJ3aW5KewoQcGxh
dGZvcm1fdmVyc2lvbhJnCmVEYXJ3aW4gS2VybmVsIFZlcnNpb24gMjMuMy4wOiBXZWQgRGVjIDIw
IDIxOjMwOjU5IFBTVCAyMDIzOyByb290OnhudS0xMDAwMi44MS41fjcvUkVMRUFTRV9BUk02NF9U
NjAzMEoKCgRjcHVzEgIYDHoCGAES3QcKEHqFqLVvwPedeuAmoET/+Q4SCJmZkPJevJVMKgxDcmV3
IENyZWF0ZWQwATkQUxV9V3K1F0G4Kxd9V3K1F0oaCg5jcmV3YWlfdmVyc2lvbhIICgYwLjE0LjBK
GgoOcHl0aG9uX3ZlcnNpb24SCAoGMy4xMS43SjEKB2NyZXdfaWQSJgokZWZhYzA0NWQtOWYxNS00
MzM1LWE4MmMtZGQ1ZmMxMDU3MDQ4ShwKDGNyZXdfcHJvY2VzcxIMCgpzZXF1ZW50aWFsShUKDWNy
ZXdfbGFuZ3VhZ2USBAoCZW5KGgoUY3Jld19udW1iZXJfb2ZfdGFza3MSAhgBShsKFWNyZXdfbnVt
YmVyX29mX2FnZW50cxICGAFKyAIKC2NyZXdfYWdlbnRzErgCCrUCW3siaWQiOiAiNzdmODRiM2It
MzczZi00MDQ5LTg4ZmYtMTJiZTQ4MDVjOGQzIiwgInJvbGUiOiAiU2NvcmVyIiwgIm1lbW9yeV9l
bmFibGVkPyI6IHRydWUsICJ2ZXJib3NlPyI6IGZhbHNlLCAibWF4X2l0ZXIiOiAxNSwgIm1heF9y
cG0iOiBudWxsLCAiaTE4biI6ICJlbiIsICJsbG0iOiAie1wibmFtZVwiOiBudWxsLCBcIm1vZGVs
X25hbWVcIjogXCJncHQtNFwiLCBcInRlbXBlcmF0dXJlXCI6IDAuNywgXCJjbGFzc1wiOiBcIkNo
YXRPcGVuQUlcIn0iLCAiZGVsZWdhdGlvbl9lbmFibGVkPyI6IGZhbHNlLCAidG9vbHNfbmFtZXMi
OiBbXX1dSoYBCgpjcmV3X3Rhc2tzEngKdlt7ImlkIjogImIwOTgxNjI1LWE0YjQtNDcyNi1hODVm
LTQ1NzcxNzg5NjQzMSIsICJhc3luY19leGVjdXRpb24/IjogZmFsc2UsICJhZ2VudF9yb2xlIjog
IlNjb3JlciIsICJ0b29sc19uYW1lcyI6IFtdfV1KKAoIcGxhdGZvcm0SHAoabWFjT1MtMTQuMy1h
cm02NC1hcm0tNjRiaXRKHAoQcGxhdGZvcm1fcmVsZWFzZRIICgYyMy4zLjBKGwoPcGxhdGZvcm1f
c3lzdGVtEggKBkRhcndpbkp7ChBwbGF0Zm9ybV92ZXJzaW9uEmcKZURhcndpbiBLZXJuZWwgVmVy
c2lvbiAyMy4zLjA6IFdlZCBEZWMgMjAgMjE6MzA6NTkgUFNUIDIwMjM7IHJvb3Q6eG51LTEwMDAy
LjgxLjV+Ny9SRUxFQVNFX0FSTTY0X1Q2MDMwSgoKBGNwdXMSAhgMegIYARLdBwoQMGkaO8s71aTo
ahUeMT5bhxIIrhxP4gFLdo0qDENyZXcgQ3JlYXRlZDABObi8VYBXcrUXQVhDV4BXcrUXShoKDmNy
ZXdhaV92ZXJzaW9uEggKBjAuMTQuMEoaCg5weXRob25fdmVyc2lvbhIICgYzLjExLjdKMQoHY3Jl
d19pZBImCiRhYjA2MGQ3ZS0zYzYxLTRkN2UtYWYwMS01ZjE1OTdmYzhkNDdKHAoMY3Jld19wcm9j
ZXNzEgwKCnNlcXVlbnRpYWxKFQoNY3Jld19sYW5ndWFnZRIECgJlbkoaChRjcmV3X251bWJlcl9v
Zl90YXNrcxICGAFKGwoVY3Jld19udW1iZXJfb2ZfYWdlbnRzEgIYAUrIAgoLY3Jld19hZ2VudHMS
uAIKtQJbeyJpZCI6ICIxODBlOTQ4Yi1kZTBjLTRhNmYtYTI2Zi0zYTVmOWI2NWViNGYiLCAicm9s
ZSI6ICJTY29yZXIiLCAibWVtb3J5X2VuYWJsZWQ/IjogdHJ1ZSwgInZlcmJvc2U/IjogZmFsc2Us
ICJtYXhfaXRlciI6IDE1LCAibWF4X3JwbSI6IG51bGwsICJpMThuIjogImVuIiwgImxsbSI6ICJ7
XCJuYW1lXCI6IG51bGwsIFwibW9kZWxfbmFtZVwiOiBcImdwdC00XCIsIFwidGVtcGVyYXR1cmVc
IjogMC43LCBcImNsYXNzXCI6IFwiQ2hhdE9wZW5BSVwifSIsICJkZWxlZ2F0aW9uX2VuYWJsZWQ/
IjogZmFsc2UsICJ0b29sc19uYW1lcyI6IFtdfV1KhgEKCmNyZXdfdGFza3MSeAp2W3siaWQiOiAi
NDUxZGNhOGEtZjUzYy00YjM3LTkxMzUtZGU4MmVkNTRkMDViIiwgImFzeW5jX2V4ZWN1dGlvbj8i
OiBmYWxzZSwgImFnZW50X3JvbGUiOiAiU2NvcmVyIiwgInRvb2xzX25hbWVzIjogW119XUooCghw
bGF0Zm9ybRIcChptYWNPUy0xNC4zLWFybTY0LWFybS02NGJpdEocChBwbGF0Zm9ybV9yZWxlYXNl
EggKBjIzLjMuMEobCg9wbGF0Zm9ybV9zeXN0ZW0SCAoGRGFyd2luSnsKEHBsYXRmb3JtX3ZlcnNp
b24SZwplRGFyd2luIEtlcm5lbCBWZXJzaW9uIDIzLjMuMDogV2VkIERlYyAyMCAyMTozMDo1OSBQ
U1QgMjAyMzsgcm9vdDp4bnUtMTAwMDIuODEuNX43L1JFTEVBU0VfQVJNNjRfVDYwMzBKCgoEY3B1
cxICGAx6AhgB
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate, br
Connection:
- keep-alive
Content-Length:
- '7248'
Content-Type:
- application/x-protobuf
User-Agent:
- OTel-OTLP-Exporter-Python/1.22.0
method: POST
uri: http://telemetry.crewai.com:4318/v1/traces
response:
body:
string: "\n\0"
headers:
Content-Length:
- '2'
Content-Type:
- application/x-protobuf
Date:
- Tue, 20 Feb 2024 02:49:15 GMT
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
lines of conversation provided, adding onto the previous summary returning a
new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks
of artificial intelligence. The AI thinks artificial intelligence is a force
for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial
intelligence is a force for good?\nAI: Because artificial intelligence will
help humans reach their full potential.\n\nNew summary:\nThe human asks what
the AI thinks of artificial intelligence. The AI thinks artificial intelligence
is a force for good because it will help humans reach their full potential.\nEND
OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: Give
me an integer score between 1-5 for the following title: ''The impact of AI
in the future of work''\nYour final answer must be: The score of the title.\nAI:
4\n\nNew summary:"}], "model": "gpt-4", "n": 1, "stream": false, "temperature":
0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, br
connection:
- keep-alive
content-length:
- '1007'
content-type:
- application/json
cookie:
- __cf_bm=Ru2FKLJKCKm88s_DX8Mq9qJ47KVzLXTzKgnKsJTlABI-1708397354-1.0-AQh3Ie3Pr21iUgelcsLQAfZsyez2JIV9depVidctxAY5ZSSNr2vj5B7ht1meQxkc4krOrDbgy9Ljf5p74nOCQlc=;
_cfuvid=vnpvno_JVcoaEfWoEMif6lomt7v63tB2n2IhYZJ5lT8-1708397354882-0.0-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.12.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.12.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
IfQIACBW6qxXU+1cmNXlgWpCR5FjmU4D/ueBc4LbIgJH0Zi4gQ2H0wFnPERPfDvX1nq8GsfCG+m3
vwIgsycJUkkUVGHzwbraXv9cnBdKbZMvzfc3pV1e/Zxdn7O5SqkvAOJdqlWQJIYpBprhEgJULt/S
e5KYrMbr2WY1WyyY3xS81zlJ0MGGwXwwXk5mUpsJG6U9STwJAPg9VQCae98hiXGfCmRhiCSBfwLI
ca5JgiLvjQ9RGagPyLWta5Og60RjQMR65DM/42PX9hKB4RU7TZsKJuQaretEI5umGWN7iae0vh9X
oXIaHOOTXdYCl4jgVZTTI5PBYohrzWC/ANYEDMZy+nxItpD/Rl3lfLCOd54kyirPjQUaxRXeHuUa
Iwnyga0K818AL84rV/7YIOu4sOEtcKZLHwNsswoUpxyCmO3ctwrkeMZ0sha3JKl2f8dbbMqDdtYZ
XwbLKs/FvzAAAw==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 85836f6d08eb1ab1-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
- keep-alive
Content-Encoding:
- br
Content-Type:
- application/json
Date:
- Tue, 20 Feb 2024 02:49:17 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
openai-model:
- gpt-4-0613
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '1797'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '300000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299764'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 47ms
x-request-id:
- req_2d47c96f83cd1d7f135dc5556fc7043a
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "user", "content": "4"}], "model": "gpt-4", "tool_choice":
body: '{"messages": [{"role": "user", "content": "The score of the title ''The
impact of AI in the future of work'' is 4."}, {"role": "system", "content":
"I''m gonna convert this raw text into valid JSON."}], "model": "gpt-4", "tool_choice":
{"type": "function", "function": {"name": "ScoreOutput"}}, "tools": [{"type":
"function", "function": {"name": "ScoreOutput", "description": "Correctly extracted
`ScoreOutput` with all the required parameters with correct types", "parameters":
@@ -405,12 +209,12 @@ interactions:
connection:
- keep-alive
content-length:
- '435'
- '586'
content-type:
- application/json
cookie:
- __cf_bm=Ru2FKLJKCKm88s_DX8Mq9qJ47KVzLXTzKgnKsJTlABI-1708397354-1.0-AQh3Ie3Pr21iUgelcsLQAfZsyez2JIV9depVidctxAY5ZSSNr2vj5B7ht1meQxkc4krOrDbgy9Ljf5p74nOCQlc=;
_cfuvid=vnpvno_JVcoaEfWoEMif6lomt7v63tB2n2IhYZJ5lT8-1708397354882-0.0-604800000
- __cf_bm=yJJOvK.mptdBnSOedMJ87OggCnwj18oNe7zJy.U.wLg-1709098670-1.0-ARWeX/RQGrEmYpidT4d8JhgrjaWyqwMX0lE4/NuXV6wQOwk42JIR5NEmoP9Ne95YgKLnPHrlShLb4NLnDwZ1/6E=;
_cfuvid=jQ_UFtYKPdfJP2SPmfRNzj8gnrGG.JmYddiPcNvV0jU-1709098670103-0.0-604800000
host:
- api.openai.com
user-agent:
@@ -434,18 +238,18 @@ interactions:
response:
body:
string: !!binary |
IRwLACBG01VPU+1snTOje1ElnIIn/z51E1watMf5mNqmL92CYRfI1BJNIKCybJ2L2x16PbIm3Psr
AC5KSlDN3yplg2mM67WrTm//WJmrLdct5+X5bTteL4960/NFzUIA9O9LraqeeNPFQF14B0FOLj/S
JSU6o/a4Nxn1BiPhFetLbSjBWaga/UZ72On1Opz7hdKZEg8CAH5LA4Dv3nco0S5aES8MgRL4EsDk
jaYE33Je5OrNVSwAmbB1oxKuNkY4VXlvXtSbMeRQDv7WM4v065sxL5Pt+VV3/Wzn4CrOt+q1td3b
w0ObzA0npJ75DoNCPTAKMaisShKGALo3O6GnLpRP+qSuQl2pMACa+HtK8PfRAY/Myif9SIn+o/un
6tp/Ucee6PF/ink0fhaSf88LhjNqHfByQ9gIJZgrH2jMfwE8WbJcG+cMQ/I2VC+VX2mXKTHsqgS4
ZzIAbJIyPzyQEWmjjhgiwSgCCy/ThZvpFNLCqrL4FwM=
ISALACBGs1lPU+1sHVPKF/OzKPbAJT51Krhk0B4UGlOz6RPdgsFcdLfVVrQCBWqtapcf93doJOi+
1W3v3QqA05wSzCZJzLRT1cGqt3jrnr+U2WN3Es+z6JPLcbf75EynbLMiANp0VmRxJl4LMVCn1kCQ
m8sfFTklmv3GsDEc9PpN4wva5oWiBMcuVjvVRq9Jit6JvxYo8SkAYNsaAfzufZcSjcooEoUhUAK/
BNBbVVCCSQjTEBMTWQHk0tZNSphSKeNctFb9ZolS4lANbvu5Q/protRvS+X/p/Gk/7G5vemGx+uD
h7OHcnjb0ITombVbFBqBUYpBbXVSMALQJHpDn3rMrC9uy+jKSGEAdPHvKcHtlwG+GDLriy9KdL7M
ntSNe9HHvuXxvcQyKjt23qbhwHBmrQN+3xA2RgmGaJ2MtRfAtyerpXOeofNWu/gb7bwwgRLDHiXA
ZyYPgEvuDw/kTGaz0RVrJJhG4OR3NDXjwjs/dass9sIAAw==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 85836f7b6ea71ab1-GRU
- 85c65165bd32010f-GRU
Cache-Control:
- no-cache, must-revalidate
Connection:
@@ -455,7 +259,7 @@ interactions:
Content-Type:
- application/json
Date:
- Tue, 20 Feb 2024 02:49:18 GMT
- Wed, 28 Feb 2024 05:37:51 GMT
Server:
- cloudflare
Transfer-Encoding:
@@ -469,7 +273,7 @@ interactions:
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '679'
- '561'
openai-version:
- '2020-10-01'
strict-transport-security:
@@ -481,13 +285,13 @@ interactions:
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '299981'
- '299951'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 3ms
- 9ms
x-request-id:
- req_b225a2bd54cb8285069537b2c3ed5510
- req_c8695af2a256e18f279f7a64f4ed3b1c
status:
code: 200
message: OK

View File

@@ -131,7 +131,7 @@ def test_crew_creation():
assert (
crew.kickoff()
== '1. "The Role of AI in Predicting and Managing Pandemics"\nHighlight: \nIn an era where global health crises can emerge from any corner of the world, the role of AI in predicting and managing pandemics has never been more critical. Through intelligent data gathering and predictive analytics, AI can potentially identify the onset of pandemics before they reach critical mass, offering a proactive solution to a reactive problem. This article explores the intersection of AI and epidemiology, delving into how this cutting-edge technology is revolutionizing our approach to global health crises.\n\n2. "AI and the Future of Work: Will Robots Take Our Jobs?"\nHighlight: \nThe rise of AI has sparked both excitement and apprehension about the future of work. Will robots replace us, or will they augment our capabilities? This article delves into the heart of this controversial issue, examining the potential of AI to disrupt job markets, transform industries, and redefine the concept of work. It\'s not just a question of job security—it\'s a discussion about the kind of world we want to live in.\n\n3. "AI in Art and Creativity: A New Frontier in Innovation"\nHighlight: \nArt and creativity, once seen as the exclusive domain of human expression, are being redefined by the advent of AI. From algorithmic compositions to AI-assisted design, this article explores the burgeoning field of AI in art and creativity. It\'s a journey into a new frontier of innovation, one where the lines between human creativity and artificial intelligence blur into an exciting, uncharted territory.\n\n4. "Ethics in AI: Balancing Innovation with Responsibility"\nHighlight: \nAs AI continues to permeate every facet of our lives, questions about its ethical implications grow louder. This article invites readers into a thoughtful exploration of the moral landscape of AI. It challenges us to balance the relentless pursuit of innovation with the weighty responsibilities that come with it, asking: How can we harness the power of AI without losing sight of our human values?\n\n5. "AI in Education: Personalizing Learning for the Next Generation"\nHighlight: \nEducation is poised for a transformation as AI enters the classroom, promising a future where learning is personalized, not generalized. This article delves into how AI can tailor educational experiences to individual learning styles, making education more effective and accessible. It\'s a glimpse into a future where AI is not just a tool for learning, but an active participant in shaping the educational journey of the next generation.'
== '1. "The Role of AI in Predictive Analysis"\nHighlight: AI is revolutionizing the way we understand and utilize data through predictive analysis. Complex algorithms can sift through vast amounts of information, predict future trends and assist businesses in making informed decisions. The article will delve into the intricate workings of AI in predictive analysis and how it is shaping industries from healthcare to finance.\n\nNotes: This topic will focus on the business aspect of AI and its transformative role in data analysis. Case studies from different industries can be used to illustrate the impact of AI in predictive analysis.\n\n2. "The Intersection of AI and Quantum Computing"\nHighlight: As we stand at the crossroads of AI and quantum computing, theres an unprecedented potential for breakthroughs in processing speed and problem-solving capabilities. This article will explore this exciting intersection, revealing how the fusion of these two technologies can push the boundaries of what\'s possible.\n\nNotes: The article will provide a detailed overview of quantum computing and how its integration with AI can revolutionize various sectors. Real-world applications and future predictions will be included.\n\n3. "AI for Sustainable Development"\nHighlight: In an era where sustainability is a global priority, AI is emerging as a powerful tool in progressing towards this goal. From optimizing resource use to monitoring environmental changes, AI\'s role in sustainable development is multifaceted and transformative. This article will shed light on how AI is being utilized to promote a more sustainable future.\n\nNotes: This topic will delve into the environmental aspect of AI and its potential in promoting sustainable development. Examples of AI applications in different environmental contexts will be provided.\n\n4. "Ethical Implications of AI"\nHighlight: As AI permeates our society, it brings along a host of ethical dilemmas. From privacy concerns to accountability, the ethical implications of AI are as complex as they are critical. This article will take a deep dive into the ethical landscape of AI, exploring the pressing issues and potential solutions.\n\nNotes: This topic will take a philosophical and ethical approach, discussing the moral implications of AI use and how they can be mitigated. It will include a wide range of perspectives from experts in the field.\n\n5. "AI in Art and Creativity"\nHighlight: The world of art is no stranger to the transformative power of AI. From creating original artworks to enhancing creative processes, AI is redefining the boundaries of art and creativity. This article will take you on a journey through the fascinating intersection of AI and creativity, showcasing the revolutionary impact of this technology in the art world.\n\nNotes: This article will explore the artistic side of AI, discussing how it\'s being used in various creative fields. It will feature interviews with artists and creators who are harnessing the power of AI in their work.'
)
@@ -152,7 +152,7 @@ def test_hierarchical_process():
assert (
crew.kickoff()
== """Here are the 5 unique and interesting ideas for articles along with a highlight paragraph for each:\n\n1) The Future of AI and Machine Learning: A deeper look into the future of AI and machine learning, revealing the potential of both and their implications on society. The article will provide an informed vision of the future, addressing the possibilities that AI and machine learning could bring to our daily lives, from healthcare to education, and the challenges we might face.\n\n2) Startups Revolutionizing Traditional Industries with Tech: This article will narrate the journey of game-changing startups that are transforming traditional industries with innovative technology. It will delve into their stories, exploring how they leverage technology to disrupt the status quo, the hurdles they've overcome, and the impact they're making.\n\n3) Personal Development in the Age of Technology: In this article, we will explore how technology has changed the landscape of personal development. We will cover how digital tools and platforms are empowering individuals to learn, grow, and achieve their goals faster than ever before.\n\n4) Ethical Issues in Software Engineering: This article will investigate the ethical dilemmas that are arising in the realm of software engineering. It will discuss the moral implications of new technologies, the responsibilities of software engineers, and the need for a robust code of ethics in this rapidly evolving field.\n\n5) Entrepreneurship in the Digital Era: In this piece, we will delve into the role of digital technology in shaping the entrepreneurial landscape. We will discuss how the digital era has given rise to new entrepreneurial opportunities, the challenges that come with it, and the skills required to thrive in this new era."""
== "Here are the five interesting ideas for our next article along with a captivating paragraph for each:\n\n1. 'AI and Climate Change: A New Hope for Sustainability':\nIn a world where climate change is a pressing concern, Artificial Intelligence (AI) offers a glimmer of hope. This article will delve into how AI's predictive capabilities and data analysis can aid in sustainability efforts, from optimizing energy consumption to predicting extreme weather patterns. Through real-world examples and expert insights, we'll explore the innovative solutions AI is bringing to the fight against climate change.\n\n2. 'AI in Art: How Neural Networks are Revolutionizing the Artistic Landscape':\nArtificial Intelligence is not just for the tech-savvy; it's making waves in the art world too. This article will unveil how AI and Neural Networks are transforming the artistic landscape, creating a new genre of AI-art. From AI that can replicate the style of famous artists to AI that creates entirely original pieces, we will delve into this fascinating intersection of technology and creativity.\n\n3. 'The Role of AI in the Post-Covid World':\nThe global pandemic has drastically altered our world, and AI has played a pivotal role in this transformation. In this article, we'll explore how AI has been instrumental in everything from predicting the virus's spread to accelerating vaccine development. We'll also look ahead to the post-Covid world, investigating the lasting changes that AI will bring about in our societies.\n\n4. 'Demystifying AI: Breaking Down Complex AI Concepts for the Everyday Reader':\nArtificial Intelligence can seem like a complex and intimidating subject, but it doesn't have to be. This article aims to demystify AI, breaking down complex concepts into understandable nuggets of information. Whether you're an AI novice or a tech enthusiast, this article will enrich your understanding of AI and its impact on our lives.\n\n5. 'The Ethical Dilemmas of AI: Balancing Innovation and Humanity':\nAs AI continues to advance, it brings along a host of ethical dilemmas. This article will delve into the heart of these issues, discussing the balance between innovation and humanity. From the potential for bias in AI algorithms to the implications of autonomous machines, we'll explore the ethical implications of AI in our society."
)
@@ -556,3 +556,118 @@ def test_task_with_no_arguments():
result = crew.kickoff()
assert result == "The total number of sales from January to May is 75."
def test_delegation_is_not_enabled_if_there_are_only_one_agent():
from unittest.mock import patch
researcher = Agent(
role="Researcher",
goal="Make the best research and analysis on content about AI and AI agents",
backstory="You're an expert researcher, specialized in technology, software engineering, AI and startups. You work as a freelancer and is now working on doing research and analysis for a new customer.",
allow_delegation=True,
)
task = Task(
description="Look at the available data nd give me a sense on the total number of sales.",
agent=researcher,
)
crew = Crew(agents=[researcher], tasks=[task])
with patch.object(Task, "execute") as execute:
execute.return_value = "ok"
crew.kickoff()
assert task.tools == []
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agents_do_not_get_delegation_tools_with_there_is_only_one_agent():
agent = Agent(
role="Researcher",
goal="Be super empathetic.",
backstory="You're love to sey howdy.",
allow_delegation=False,
)
task = Task(description="say howdy", expected_output="Howdy!", agent=agent)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
assert result == "Howdy!"
assert len(agent.tools) == 0
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_usage_metrics_are_captured_for_sequential_process():
agent = Agent(
role="Researcher",
goal="Be super empathetic.",
backstory="You're love to sey howdy.",
allow_delegation=False,
)
task = Task(description="say howdy", expected_output="Howdy!", agent=agent)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
assert result == "Howdy!"
assert crew.usage_metrics == {
"completion_tokens": 8,
"prompt_tokens": 103,
"successful_requests": 1,
"total_tokens": 111,
}
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_usage_metrics_are_captured_for_hierarchical_process():
from langchain_openai import ChatOpenAI
agent = Agent(
role="Researcher",
goal="Be super empathetic.",
backstory="You're love to sey howdy.",
allow_delegation=False,
)
task = Task(description="say howdy", expected_output="Howdy!")
crew = Crew(
agents=[agent],
tasks=[task],
process=Process.hierarchical,
manager_llm=ChatOpenAI(temperature=0, model="gpt-4"),
)
result = crew.kickoff()
assert result == "Howdy!"
assert crew.usage_metrics == {
"total_tokens": 1365,
"prompt_tokens": 1256,
"completion_tokens": 109,
"successful_requests": 3,
}
def test_crew_inputs_interpolate_both_agents_and_tasks():
agent = Agent(
role="{topic} Researcher",
goal="Express hot takes on {topic}.",
backstory="You have a lot of experience with {topic}.",
)
task = Task(
description="Give me an analysis around {topic}.",
expected_output="{points} bullet points about {topic}.",
)
crew = Crew(agents=[agent], tasks=[task], inputs={"topic": "AI", "points": 5})
assert crew.tasks[0].description == "Give me an analysis around AI."
assert crew.tasks[0].expected_output == "5 bullet points about AI."
assert crew.agents[0].role == "AI Researcher"
assert crew.agents[0].goal == "Express hot takes on AI."
assert crew.agents[0].backstory == "You have a lot of experience with AI."

View File

@@ -6,7 +6,7 @@ import pytest
from pydantic import BaseModel
from pydantic_core import ValidationError
from crewai import Agent, Crew, Task
from crewai import Agent, Crew, Process, Task
def test_task_tool_reflect_agent_tools():
@@ -235,7 +235,7 @@ def test_output_pydantic_to_another_task():
crew = Crew(agents=[scorer], tasks=[task1, task2], verbose=2)
result = crew.kickoff()
assert 5 == result.score
assert 4 == result.score
@pytest.mark.vcr(filter_headers=["authorization"])
@@ -347,3 +347,103 @@ def test_save_task_pydantic_output():
save_file.return_value = None
crew.kickoff()
save_file.assert_called_once_with('{"score":4}')
@pytest.mark.vcr(filter_headers=["authorization"])
def test_increment_delegations_for_hierarchical_process():
from langchain_openai import ChatOpenAI
scorer = Agent(
role="Scorer",
goal="Score the title",
backstory="You're an expert scorer, specialized in scoring titles.",
allow_delegation=False,
)
task = Task(
description="Give me an integer score between 1-5 for the following title: 'The impact of AI in the future of work'",
expected_output="The score of the title.",
)
crew = Crew(
agents=[scorer],
tasks=[task],
process=Process.hierarchical,
manager_llm=ChatOpenAI(model="gpt-4-0125-preview"),
)
with patch.object(Task, "increment_delegations") as increment_delegations:
increment_delegations.return_value = None
crew.kickoff()
increment_delegations.assert_called_once
@pytest.mark.vcr(filter_headers=["authorization"])
def test_increment_delegations_for_sequential_process():
pass
manager = Agent(
role="Manager",
goal="Coordinate scoring processes",
backstory="You're great at delegating work about scoring.",
allow_delegation=False,
)
scorer = Agent(
role="Scorer",
goal="Score the title",
backstory="You're an expert scorer, specialized in scoring titles.",
allow_delegation=False,
)
task = Task(
description="Give me an integer score between 1-5 for the following title: 'The impact of AI in the future of work'",
expected_output="The score of the title.",
agent=manager,
)
crew = Crew(
agents=[manager, scorer],
tasks=[task],
process=Process.sequential,
)
with patch.object(Task, "increment_delegations") as increment_delegations:
increment_delegations.return_value = None
crew.kickoff()
increment_delegations.assert_called_once
@pytest.mark.vcr(filter_headers=["authorization"])
def test_increment_tool_errors():
from crewai_tools import tool
from langchain_openai import ChatOpenAI
@tool
def scoring_examples() -> None:
"Useful examples for scoring titles."
raise Exception("Error")
scorer = Agent(
role="Scorer",
goal="Score the title",
backstory="You're an expert scorer, specialized in scoring titles.",
tools=[scoring_examples],
)
task = Task(
description="Give me an integer score between 1-5 for the following title: 'The impact of AI in the future of work', check examples to based your evaluation.",
expected_output="The score of the title.",
)
crew = Crew(
agents=[scorer],
tasks=[task],
process=Process.hierarchical,
manager_llm=ChatOpenAI(model="gpt-4-0125-preview"),
)
with patch.object(Task, "increment_tools_errors") as increment_tools_errors:
increment_tools_errors.return_value = None
crew.kickoff()
increment_tools_errors.assert_called_once