mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-16 12:28:30 +00:00
Compare commits
32 Commits
devin/1765
...
lj/conditi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0cc37e0d72 | ||
|
|
bb33e1813d | ||
|
|
96dc96d13c | ||
|
|
6efbe8c5a5 | ||
|
|
a3bdc09f2d | ||
|
|
bae9c70730 | ||
|
|
55af7e0f15 | ||
|
|
4e8f69a7b0 | ||
|
|
e745094d73 | ||
|
|
60d0f56e2d | ||
|
|
053d8a0449 | ||
|
|
0bfa549477 | ||
|
|
5334e9e585 | ||
|
|
68de393534 | ||
|
|
f36f73e035 | ||
|
|
1f9166f61b | ||
|
|
5a5276eb5d | ||
|
|
60c8f86345 | ||
|
|
6a47eb4f9e | ||
|
|
2efe16eac9 | ||
|
|
1d2827e9a5 | ||
|
|
5091712a2d | ||
|
|
764234c426 | ||
|
|
be0a4c2fe5 | ||
|
|
cc1c97e87d | ||
|
|
5775ed3fcb | ||
|
|
5f820cedcc | ||
|
|
f86e4a1990 | ||
|
|
ee4a996de3 | ||
|
|
5c504f4087 | ||
|
|
26489ced1a | ||
|
|
ea5a784877 |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -13,4 +13,5 @@ db/
|
||||
test.py
|
||||
rc-tests/*
|
||||
*.pkl
|
||||
temp/*
|
||||
temp/*
|
||||
.vscode/*
|
||||
@@ -7,16 +7,15 @@ from langchain.tools.render import render_text_description
|
||||
from langchain_core.agents import AgentAction
|
||||
from langchain_core.callbacks import BaseCallbackHandler
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
from pydantic import Field, InstanceOf, model_validator
|
||||
|
||||
from crewai.agents import CacheHandler, CrewAgentExecutor, CrewAgentParser
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.memory.contextual.contextual_memory import ContextualMemory
|
||||
from crewai.tools.agent_tools import AgentTools
|
||||
from crewai.utilities import Prompts, Converter
|
||||
from crewai.utilities import Converter, Prompts
|
||||
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
|
||||
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
|
||||
agentops = None
|
||||
@@ -91,7 +90,6 @@ class Agent(BaseAgent):
|
||||
response_template: Optional[str] = Field(
|
||||
default=None, description="Response format for the agent."
|
||||
)
|
||||
|
||||
allow_code_execution: Optional[bool] = Field(
|
||||
default=False, description="Enable code execution for the agent."
|
||||
)
|
||||
@@ -103,7 +101,7 @@ class Agent(BaseAgent):
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_agent_executor(self) -> "Agent":
|
||||
"""Ensure agent executor and token process is set."""
|
||||
"""Ensure agent executor and token process are set."""
|
||||
if hasattr(self.llm, "model_name"):
|
||||
token_handler = TokenCalcHandler(self.llm.model_name, self._token_process)
|
||||
|
||||
|
||||
@@ -1,23 +1,26 @@
|
||||
from copy import deepcopy
|
||||
import uuid
|
||||
from typing import Any, Dict, List, Optional
|
||||
from abc import ABC, abstractmethod
|
||||
from copy import copy as shallow_copy
|
||||
from typing import Any, Dict, List, Optional, TypeVar
|
||||
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
BaseModel,
|
||||
ConfigDict,
|
||||
Field,
|
||||
InstanceOf,
|
||||
PrivateAttr,
|
||||
field_validator,
|
||||
model_validator,
|
||||
ConfigDict,
|
||||
PrivateAttr,
|
||||
)
|
||||
from pydantic_core import PydanticCustomError
|
||||
|
||||
from crewai.utilities import I18N, RPMController, Logger
|
||||
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
|
||||
from crewai.agents.cache.cache_handler import CacheHandler
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.utilities.token_counter_callback import TokenProcess
|
||||
from crewai.utilities import I18N, Logger, RPMController
|
||||
|
||||
T = TypeVar("T", bound="BaseAgent")
|
||||
|
||||
|
||||
class BaseAgent(ABC, BaseModel):
|
||||
@@ -188,6 +191,31 @@ class BaseAgent(ABC, BaseModel):
|
||||
"""Get the converter class for the agent to create json/pydantic outputs."""
|
||||
pass
|
||||
|
||||
def copy(self: T) -> T:
|
||||
"""Create a deep copy of the Agent."""
|
||||
exclude = {
|
||||
"id",
|
||||
"_logger",
|
||||
"_rpm_controller",
|
||||
"_request_within_rpm_limit",
|
||||
"_token_process",
|
||||
"agent_executor",
|
||||
"tools",
|
||||
"tools_handler",
|
||||
"cache_handler",
|
||||
"llm",
|
||||
}
|
||||
|
||||
# Copy llm and clear callbacks
|
||||
existing_llm = shallow_copy(self.llm)
|
||||
existing_llm.callbacks = []
|
||||
copied_data = self.model_dump(exclude=exclude)
|
||||
copied_data = {k: v for k, v in copied_data.items() if v is not None}
|
||||
|
||||
copied_agent = type(self)(**copied_data, llm=existing_llm, tools=self.tools)
|
||||
|
||||
return copied_agent
|
||||
|
||||
def interpolate_inputs(self, inputs: Dict[str, Any]) -> None:
|
||||
"""Interpolate inputs into the agent description and backstory."""
|
||||
if self._original_role is None:
|
||||
@@ -217,35 +245,6 @@ class BaseAgent(ABC, BaseModel):
|
||||
def increment_formatting_errors(self) -> None:
|
||||
self.formatting_errors += 1
|
||||
|
||||
def copy(self):
|
||||
exclude = {
|
||||
"id",
|
||||
"_logger",
|
||||
"_rpm_controller",
|
||||
"_request_within_rpm_limit",
|
||||
"token_process",
|
||||
"agent_executor",
|
||||
"tools",
|
||||
"tools_handler",
|
||||
"cache_handler",
|
||||
"crew",
|
||||
"llm",
|
||||
}
|
||||
|
||||
copied_data = self.model_dump(exclude=exclude, exclude_unset=True)
|
||||
copied_agent = self.__class__(**copied_data)
|
||||
|
||||
# Copy mutable attributes separately
|
||||
copied_agent.tools = deepcopy(self.tools)
|
||||
copied_agent.config = deepcopy(self.config)
|
||||
|
||||
# Preserve original values for interpolation
|
||||
copied_agent._original_role = self._original_role
|
||||
copied_agent._original_goal = self._original_goal
|
||||
copied_agent._original_backstory = self._original_backstory
|
||||
|
||||
return copied_agent
|
||||
|
||||
def set_rpm_controller(self, rpm_controller: RPMController) -> None:
|
||||
"""Set the rpm controller for the agent.
|
||||
|
||||
|
||||
37
src/crewai/conditional_task.py
Normal file
37
src/crewai/conditional_task.py
Normal file
@@ -0,0 +1,37 @@
|
||||
from typing import Callable, Optional, Any
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.task import Task
|
||||
|
||||
|
||||
class ConditionalTask(Task):
|
||||
"""
|
||||
A task that can be conditionally executed based on the output of another task.
|
||||
Note: This cannot be the only task you have in your crew and cannot be the first since its needs context from the previous task.
|
||||
"""
|
||||
|
||||
condition: Optional[Callable[[BaseModel], bool]] = None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*args,
|
||||
condition: Optional[Callable[[BaseModel], bool]] = None,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.condition = condition
|
||||
|
||||
def should_execute(self, context: Any) -> bool:
|
||||
"""
|
||||
Determines whether the conditional task should be executed based on the provided context.
|
||||
|
||||
Args:
|
||||
context (Any): The context or output from the previous task that will be evaluated by the condition.
|
||||
|
||||
Returns:
|
||||
bool: True if the task should be executed, False otherwise.
|
||||
"""
|
||||
if self.condition:
|
||||
return self.condition(context)
|
||||
return True
|
||||
@@ -1,7 +1,8 @@
|
||||
import asyncio
|
||||
import json
|
||||
import uuid
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
from concurrent.futures import Future
|
||||
from typing import Any, Dict, List, Optional, Tuple, Union
|
||||
|
||||
from langchain_core.callbacks import BaseCallbackHandler
|
||||
from pydantic import (
|
||||
@@ -20,15 +21,19 @@ from pydantic_core import PydanticCustomError
|
||||
from crewai.agent import Agent
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.cache import CacheHandler
|
||||
from crewai.conditional_task import ConditionalTask
|
||||
from crewai.crews.crew_output import CrewOutput
|
||||
from crewai.memory.entity.entity_memory import EntityMemory
|
||||
from crewai.memory.long_term.long_term_memory import LongTermMemory
|
||||
from crewai.memory.short_term.short_term_memory import ShortTermMemory
|
||||
from crewai.process import Process
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.telemetry import Telemetry
|
||||
from crewai.tools.agent_tools import AgentTools
|
||||
from crewai.utilities import I18N, FileHandler, Logger, RPMController
|
||||
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
|
||||
from crewai.utilities.formatter import aggregate_raw_outputs_from_task_outputs
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
|
||||
try:
|
||||
@@ -56,7 +61,6 @@ class Crew(BaseModel):
|
||||
max_rpm: Maximum number of requests per minute for the crew execution to be respected.
|
||||
prompt_file: Path to the prompt json file to be used for the crew.
|
||||
id: A unique identifier for the crew instance.
|
||||
full_output: Whether the crew should return the full output with all tasks outputs and token usage metrics or just the final output.
|
||||
task_callback: Callback to be executed after each task for every agents execution.
|
||||
step_callback: Callback to be executed after each step for every agents execution.
|
||||
share_crew: Whether you want to share the complete crew information and execution with crewAI to make the library better, and allow us to train models.
|
||||
@@ -92,10 +96,6 @@ class Crew(BaseModel):
|
||||
default=None,
|
||||
description="Metrics for the LLM usage during all tasks execution.",
|
||||
)
|
||||
full_output: Optional[bool] = Field(
|
||||
default=False,
|
||||
description="Whether the crew should return the full output with all tasks outputs and token usage metrics or just the final output.",
|
||||
)
|
||||
manager_llm: Optional[Any] = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
@@ -224,6 +224,17 @@ class Crew(BaseModel):
|
||||
agent.set_rpm_controller(self._rpm_controller)
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_first_task(self) -> "Crew":
|
||||
"""Ensure the first task is not a ConditionalTask."""
|
||||
if self.tasks and isinstance(self.tasks[0], ConditionalTask):
|
||||
raise PydanticCustomError(
|
||||
"invalid_first_task",
|
||||
"The first task cannot be a ConditionalTask.",
|
||||
{},
|
||||
)
|
||||
return self
|
||||
|
||||
def _setup_from_config(self):
|
||||
assert self.config is not None, "Config should not be None."
|
||||
|
||||
@@ -283,11 +294,12 @@ class Crew(BaseModel):
|
||||
|
||||
def kickoff(
|
||||
self,
|
||||
inputs: Optional[Dict[str, Any]] = {},
|
||||
) -> Union[str, Dict[str, Any]]:
|
||||
inputs: Optional[Dict[str, Any]] = None,
|
||||
) -> CrewOutput:
|
||||
"""Starts the crew to work on its assigned tasks."""
|
||||
self._execution_span = self._telemetry.crew_execution_span(self, inputs)
|
||||
# type: ignore # Argument 1 to "_interpolate_inputs" of "Crew" has incompatible type "dict[str, Any] | None"; expected "dict[str, Any]"
|
||||
if inputs is not None:
|
||||
self._interpolate_inputs(inputs)
|
||||
self._interpolate_inputs(inputs)
|
||||
self._set_tasks_callbacks()
|
||||
|
||||
@@ -299,16 +311,13 @@ class Crew(BaseModel):
|
||||
# type: ignore[attr-defined] # Argument 1 to "_interpolate_inputs" of "Crew" has incompatible type "dict[str, Any] | None"; expected "dict[str, Any]"
|
||||
agent.crew = self # type: ignore[attr-defined]
|
||||
# TODO: Create an AgentFunctionCalling protocol for future refactoring
|
||||
if (
|
||||
hasattr(agent, "function_calling_llm")
|
||||
and not agent.function_calling_llm
|
||||
):
|
||||
if not agent.function_calling_llm:
|
||||
agent.function_calling_llm = self.function_calling_llm
|
||||
|
||||
if hasattr(agent, "allow_code_execution") and agent.allow_code_execution:
|
||||
if agent.allow_code_execution:
|
||||
agent.tools += agent.get_code_execution_tools()
|
||||
|
||||
if hasattr(agent, "step_callback") and not agent.step_callback:
|
||||
if not agent.step_callback:
|
||||
agent.step_callback = self.step_callback
|
||||
|
||||
agent.create_agent_executor()
|
||||
@@ -326,9 +335,7 @@ class Crew(BaseModel):
|
||||
raise NotImplementedError(
|
||||
f"The process '{self.process}' is not implemented yet."
|
||||
)
|
||||
metrics = metrics + [
|
||||
agent._token_process.get_summary() for agent in self.agents
|
||||
]
|
||||
metrics += [agent._token_process.get_summary() for agent in self.agents]
|
||||
|
||||
self.usage_metrics = {
|
||||
key: sum([m[key] for m in metrics if m is not None]) for key in metrics[0]
|
||||
@@ -336,52 +343,93 @@ class Crew(BaseModel):
|
||||
|
||||
return result
|
||||
|
||||
def kickoff_for_each(self, inputs: List[Dict[str, Any]]) -> List:
|
||||
def kickoff_for_each(self, inputs: List[Dict[str, Any]]) -> List[CrewOutput]:
|
||||
"""Executes the Crew's workflow for each input in the list and aggregates results."""
|
||||
results = []
|
||||
results: List[CrewOutput] = []
|
||||
|
||||
# Initialize the parent crew's usage metrics
|
||||
total_usage_metrics = {
|
||||
"total_tokens": 0,
|
||||
"prompt_tokens": 0,
|
||||
"completion_tokens": 0,
|
||||
"successful_requests": 0,
|
||||
}
|
||||
|
||||
for input_data in inputs:
|
||||
crew = self.copy()
|
||||
|
||||
for task in crew.tasks:
|
||||
task.interpolate_inputs(input_data)
|
||||
for agent in crew.agents:
|
||||
agent.interpolate_inputs(input_data)
|
||||
output = crew.kickoff(inputs=input_data)
|
||||
|
||||
if crew.usage_metrics:
|
||||
for key in total_usage_metrics:
|
||||
total_usage_metrics[key] += crew.usage_metrics.get(key, 0)
|
||||
|
||||
output = crew.kickoff()
|
||||
results.append(output)
|
||||
|
||||
self.usage_metrics = total_usage_metrics
|
||||
return results
|
||||
|
||||
async def kickoff_async(
|
||||
self, inputs: Optional[Dict[str, Any]] = {}
|
||||
self, inputs: Optional[CrewOutput] = {}
|
||||
) -> Union[str, Dict]:
|
||||
"""Asynchronous kickoff method to start the crew execution."""
|
||||
return await asyncio.to_thread(self.kickoff, inputs)
|
||||
|
||||
async def kickoff_for_each_async(self, inputs: List[Dict]) -> List[Any]:
|
||||
async def run_crew(input_data):
|
||||
crew = self.copy()
|
||||
async def kickoff_for_each_async(self, inputs: List[Dict]) -> List[CrewOutput]:
|
||||
crew_copies = [self.copy() for _ in inputs]
|
||||
|
||||
for task in crew.tasks:
|
||||
task.interpolate_inputs(input_data)
|
||||
for agent in crew.agents:
|
||||
agent.interpolate_inputs(input_data)
|
||||
async def run_crew(crew, input_data):
|
||||
return await crew.kickoff_async(inputs=input_data)
|
||||
|
||||
return await crew.kickoff_async()
|
||||
|
||||
tasks = [asyncio.create_task(run_crew(input_data)) for input_data in inputs]
|
||||
tasks = [
|
||||
asyncio.create_task(run_crew(crew_copies[i], inputs[i]))
|
||||
for i in range(len(inputs))
|
||||
]
|
||||
|
||||
results = await asyncio.gather(*tasks)
|
||||
|
||||
total_usage_metrics = {
|
||||
"total_tokens": 0,
|
||||
"prompt_tokens": 0,
|
||||
"completion_tokens": 0,
|
||||
"successful_requests": 0,
|
||||
}
|
||||
for crew in crew_copies:
|
||||
if crew.usage_metrics:
|
||||
for key in total_usage_metrics:
|
||||
total_usage_metrics[key] += crew.usage_metrics.get(key, 0)
|
||||
|
||||
self.usage_metrics = total_usage_metrics
|
||||
|
||||
return results
|
||||
|
||||
def _run_sequential_process(self) -> str:
|
||||
def _run_sequential_process(self) -> CrewOutput:
|
||||
"""Executes tasks sequentially and returns the final output."""
|
||||
task_output = ""
|
||||
token_usage = []
|
||||
task_outputs: List[TaskOutput] = []
|
||||
futures: List[Tuple[Task, Future[TaskOutput]]] = []
|
||||
|
||||
for task in self.tasks:
|
||||
if task.agent.allow_delegation: # type: ignore # Item "None" of "Agent | None" has no attribute "allow_delegation"
|
||||
if isinstance(task, ConditionalTask):
|
||||
if futures:
|
||||
task_outputs = []
|
||||
for future_task, future in futures:
|
||||
task_output = future.result()
|
||||
task_outputs.append(task_output)
|
||||
self._process_task_result(future_task, task_output)
|
||||
futures.clear()
|
||||
|
||||
previous_output = task_outputs[-1] if task_outputs else None
|
||||
if previous_output is not None and not task.should_execute(
|
||||
previous_output.result()
|
||||
):
|
||||
self._logger.log(
|
||||
"info",
|
||||
f"Skipping conditional task: {task.description}",
|
||||
color="yellow",
|
||||
)
|
||||
continue
|
||||
|
||||
if task.agent and task.agent.allow_delegation:
|
||||
agents_for_delegation = [
|
||||
agent for agent in self.agents if agent != task.agent
|
||||
]
|
||||
@@ -398,29 +446,61 @@ class Crew(BaseModel):
|
||||
self._file_handler.log(
|
||||
agent=role, task=task.description, status="started"
|
||||
)
|
||||
output = task.execute(context=task_output)
|
||||
|
||||
if not task.async_execution:
|
||||
task_output = output
|
||||
if task.async_execution:
|
||||
context = aggregate_raw_outputs_from_task_outputs(task_outputs)
|
||||
future = task.execute_async(
|
||||
agent=task.agent, context=context, tools=task.tools
|
||||
)
|
||||
futures.append((task, future))
|
||||
else:
|
||||
# Before executing a synchronous task, wait for all async tasks to complete
|
||||
if futures:
|
||||
# Clear task_outputs before processing async tasks
|
||||
task_outputs = []
|
||||
for future_task, future in futures:
|
||||
task_output = future.result()
|
||||
task_outputs.append(task_output)
|
||||
self._process_task_result(future_task, task_output)
|
||||
|
||||
role = task.agent.role if task.agent is not None else "None"
|
||||
self._logger.log("debug", f"== [{role}] Task output: {task_output}\n\n")
|
||||
token_summ = task.agent._token_process.get_summary()
|
||||
# Clear the futures list after processing all async results
|
||||
futures.clear()
|
||||
|
||||
token_usage.append(token_summ)
|
||||
context = aggregate_raw_outputs_from_task_outputs(task_outputs)
|
||||
task_output = task.execute_sync(
|
||||
agent=task.agent, context=context, tools=task.tools
|
||||
)
|
||||
task_outputs.append(task_output)
|
||||
self._process_task_result(task, task_output)
|
||||
if futures:
|
||||
# Clear task_outputs before processing async tasks
|
||||
task_outputs = []
|
||||
for future_task, future in futures:
|
||||
task_output = future.result()
|
||||
task_outputs.append(task_output)
|
||||
self._process_task_result(future_task, task_output)
|
||||
|
||||
if self.output_log_file:
|
||||
self._file_handler.log(agent=role, task=task_output, status="completed")
|
||||
final_string_output = aggregate_raw_outputs_from_task_outputs(task_outputs)
|
||||
self._finish_execution(final_string_output)
|
||||
# TODO: need to revert
|
||||
# token_usage = self.calculate_usage_metrics()
|
||||
token_usage = {
|
||||
"total_tokens": 0,
|
||||
"prompt_tokens": 0,
|
||||
"completion_tokens": 0,
|
||||
"successful_requests": 0,
|
||||
}
|
||||
|
||||
token_usage_formatted = self.aggregate_token_usage(token_usage)
|
||||
self._finish_execution(task_output)
|
||||
return self._format_output(task_outputs, token_usage)
|
||||
|
||||
# type: ignore # Incompatible return value type (got "tuple[str, Any]", expected "str")
|
||||
return self._format_output(task_output, token_usage_formatted)
|
||||
def _process_task_result(self, task: Task, output: TaskOutput) -> None:
|
||||
role = task.agent.role if task.agent is not None else "None"
|
||||
self._logger.log("debug", f"== [{role}] Task output: {output}\n\n")
|
||||
if self.output_log_file:
|
||||
self._file_handler.log(agent=role, task=output, status="completed")
|
||||
|
||||
def _run_hierarchical_process(self) -> Union[str, Dict[str, Any]]:
|
||||
def _run_hierarchical_process(self) -> Tuple[CrewOutput, Dict[str, Any]]:
|
||||
"""Creates and assigns a manager agent to make sure the crew completes the tasks."""
|
||||
|
||||
i18n = I18N(prompt_file=self.prompt_file)
|
||||
if self.manager_agent is not None:
|
||||
self.manager_agent.allow_delegation = True
|
||||
@@ -437,9 +517,11 @@ class Crew(BaseModel):
|
||||
llm=self.manager_llm,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
self.manager_agent = manager
|
||||
|
||||
task_outputs: List[TaskOutput] = []
|
||||
futures: List[Tuple[Task, Future[TaskOutput]]] = []
|
||||
|
||||
task_output = ""
|
||||
token_usage = []
|
||||
for task in self.tasks:
|
||||
self._logger.log("debug", f"Working Agent: {manager.role}")
|
||||
self._logger.log("info", f"Starting Task: {task.description}")
|
||||
@@ -449,29 +531,50 @@ class Crew(BaseModel):
|
||||
agent=manager.role, task=task.description, status="started"
|
||||
)
|
||||
|
||||
task_output = task.execute(
|
||||
agent=manager, context=task_output, tools=manager.tools
|
||||
)
|
||||
|
||||
self._logger.log("debug", f"[{manager.role}] Task output: {task_output}")
|
||||
if hasattr(task, "agent._token_process"):
|
||||
token_summ = task.agent._token_process.get_summary()
|
||||
token_usage.append(token_summ)
|
||||
if self.output_log_file:
|
||||
self._file_handler.log(
|
||||
agent=manager.role, task=task_output, status="completed"
|
||||
if task.async_execution:
|
||||
context = aggregate_raw_outputs_from_task_outputs(task_outputs)
|
||||
future = task.execute_async(
|
||||
agent=manager, context=context, tools=manager.tools
|
||||
)
|
||||
futures.append((task, future))
|
||||
else:
|
||||
# Before executing a synchronous task, wait for all async tasks to complete
|
||||
if futures:
|
||||
# Clear task_outputs before processing async tasks
|
||||
task_outputs = []
|
||||
for future_task, future in futures:
|
||||
task_output = future.result()
|
||||
task_outputs.append(task_output)
|
||||
self._process_task_result(future_task, task_output)
|
||||
|
||||
self._finish_execution(task_output)
|
||||
# Clear the futures list after processing all async results
|
||||
futures.clear()
|
||||
|
||||
# type: ignore # Incompatible return value type (got "tuple[str, Any]", expected "str")
|
||||
manager_token_usage = manager._token_process.get_summary()
|
||||
token_usage.append(manager_token_usage)
|
||||
token_usage_formatted = self.aggregate_token_usage(token_usage)
|
||||
context = aggregate_raw_outputs_from_task_outputs(task_outputs)
|
||||
task_output = task.execute_sync(
|
||||
agent=manager, context=context, tools=manager.tools
|
||||
)
|
||||
task_outputs = [task_output]
|
||||
self._process_task_result(task, task_output)
|
||||
|
||||
return self._format_output(
|
||||
task_output, token_usage_formatted
|
||||
), manager_token_usage
|
||||
# Process any remaining async results
|
||||
if futures:
|
||||
# Clear task_outputs before processing async tasks
|
||||
task_outputs = []
|
||||
for future_task, future in futures:
|
||||
task_output = future.result()
|
||||
task_outputs.append(task_output)
|
||||
self._process_task_result(future_task, task_output)
|
||||
|
||||
final_string_output = aggregate_raw_outputs_from_task_outputs(task_outputs)
|
||||
self._finish_execution(final_string_output)
|
||||
|
||||
token_usage = self.calculate_usage_metrics()
|
||||
|
||||
return (
|
||||
self._format_output(task_outputs, token_usage),
|
||||
token_usage,
|
||||
)
|
||||
|
||||
def copy(self):
|
||||
"""Create a deep copy of the Crew."""
|
||||
@@ -486,12 +589,13 @@ class Crew(BaseModel):
|
||||
"_short_term_memory",
|
||||
"_long_term_memory",
|
||||
"_entity_memory",
|
||||
"_telemetry",
|
||||
"agents",
|
||||
"tasks",
|
||||
}
|
||||
|
||||
cloned_agents = [agent.copy() for agent in self.agents]
|
||||
cloned_tasks = [task.copy() for task in self.tasks]
|
||||
cloned_tasks = [task.copy(cloned_agents) for task in self.tasks]
|
||||
|
||||
copied_data = self.model_dump(exclude=exclude)
|
||||
copied_data = {k: v for k, v in copied_data.items() if v is not None}
|
||||
@@ -523,35 +627,57 @@ class Crew(BaseModel):
|
||||
agent.interpolate_inputs(inputs)
|
||||
|
||||
def _format_output(
|
||||
self, output: str, token_usage: Optional[Dict[str, Any]] = None
|
||||
) -> Union[str, Dict[str, Any]]:
|
||||
self, output: List[TaskOutput], token_usage: Optional[Dict[str, Any]]
|
||||
) -> CrewOutput:
|
||||
"""
|
||||
Formats the output of the crew execution.
|
||||
If full_output is True, then returned data type will be a dictionary else returned outputs are string
|
||||
"""
|
||||
if self.full_output:
|
||||
return { # type: ignore # Incompatible return value type (got "dict[str, Sequence[str | TaskOutput | None]]", expected "str")
|
||||
"final_output": output,
|
||||
"tasks_outputs": [task.output for task in self.tasks if task],
|
||||
"usage_metrics": token_usage,
|
||||
}
|
||||
else:
|
||||
return output
|
||||
|
||||
def _finish_execution(self, output) -> None:
|
||||
# breakpoint()
|
||||
task_output = []
|
||||
for task in self.tasks:
|
||||
if task.output:
|
||||
# print("task.output", task.output)
|
||||
task_output.append(task.output.result())
|
||||
return CrewOutput(
|
||||
output=output,
|
||||
# tasks_output=[task.output for task in self.tasks if task],
|
||||
tasks_output=task_output,
|
||||
token_usage=token_usage,
|
||||
)
|
||||
|
||||
def _finish_execution(self, final_string_output: str) -> None:
|
||||
if self.max_rpm:
|
||||
self._rpm_controller.stop_rpm_counter()
|
||||
if agentops:
|
||||
agentops.end_session(
|
||||
end_state="Success", end_state_reason="Finished Execution", is_auto_end=True
|
||||
end_state="Success",
|
||||
end_state_reason="Finished Execution",
|
||||
is_auto_end=True,
|
||||
)
|
||||
self._telemetry.end_crew(self, output)
|
||||
self._telemetry.end_crew(self, final_string_output)
|
||||
|
||||
def calculate_usage_metrics(self) -> Dict[str, int]:
|
||||
"""Calculates and returns the usage metrics."""
|
||||
total_usage_metrics = {
|
||||
"total_tokens": 0,
|
||||
"prompt_tokens": 0,
|
||||
"completion_tokens": 0,
|
||||
"successful_requests": 0,
|
||||
}
|
||||
|
||||
for agent in self.agents:
|
||||
if hasattr(agent, "_token_process"):
|
||||
token_sum = agent._token_process.get_summary()
|
||||
for key in total_usage_metrics:
|
||||
total_usage_metrics[key] += token_sum.get(key, 0)
|
||||
|
||||
if self.manager_agent and hasattr(self.manager_agent, "_token_process"):
|
||||
token_sum = self.manager_agent._token_process.get_summary()
|
||||
for key in total_usage_metrics:
|
||||
total_usage_metrics[key] += token_sum.get(key, 0)
|
||||
|
||||
return total_usage_metrics
|
||||
|
||||
def __repr__(self):
|
||||
return f"Crew(id={self.id}, process={self.process}, number_of_agents={len(self.agents)}, number_of_tasks={len(self.tasks)})"
|
||||
|
||||
def aggregate_token_usage(self, token_usage_list: List[Dict[str, Any]]):
|
||||
return {
|
||||
key: sum([m[key] for m in token_usage_list if m is not None])
|
||||
for key in token_usage_list[0]
|
||||
}
|
||||
|
||||
1
src/crewai/crews/__init__.py
Normal file
1
src/crewai/crews/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
from .crew_output import CrewOutput
|
||||
48
src/crewai/crews/crew_output.py
Normal file
48
src/crewai/crews/crew_output.py
Normal file
@@ -0,0 +1,48 @@
|
||||
from typing import Any, Dict, List, Union
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.utilities.formatter import aggregate_raw_outputs_from_task_outputs
|
||||
|
||||
|
||||
class CrewOutput(BaseModel):
|
||||
output: List[TaskOutput] = Field(description="Result of the final task")
|
||||
# NOTE HERE
|
||||
# tasks_output: list[TaskOutput] = Field(
|
||||
# description="Output of each task", default=[]
|
||||
# )
|
||||
tasks_output: list[Union[str, BaseModel, Dict[str, Any]]] = Field(
|
||||
description="Output of each task", default=[]
|
||||
)
|
||||
token_usage: Dict[str, Any] = Field(
|
||||
description="Processed token summary", default={}
|
||||
)
|
||||
|
||||
# TODO: Ask @joao what is the desired behavior here
|
||||
def result(
|
||||
self,
|
||||
) -> List[str | BaseModel | Dict[str, Any]]:
|
||||
"""Return the result of the task based on the available output."""
|
||||
results = [output.result() for output in self.output]
|
||||
return results
|
||||
|
||||
def raw_output(self) -> str:
|
||||
"""Return the raw output of the task."""
|
||||
return aggregate_raw_outputs_from_task_outputs(self.output)
|
||||
|
||||
def to_output_dict(self) -> List[Dict[str, Any]]:
|
||||
output_dict = [output.to_output_dict() for output in self.output]
|
||||
return output_dict
|
||||
|
||||
def __getitem__(self, key: str) -> Any:
|
||||
if len(self.output) == 0:
|
||||
return None
|
||||
elif len(self.output) == 1:
|
||||
return self.output[0][key]
|
||||
else:
|
||||
return [output[key] for output in self.output]
|
||||
|
||||
# TODO: Confirm with Joao that we want to print the raw output and not the object
|
||||
def __str__(self):
|
||||
return str(self.raw_output())
|
||||
@@ -2,8 +2,9 @@ import os
|
||||
import re
|
||||
import threading
|
||||
import uuid
|
||||
from copy import deepcopy
|
||||
from typing import Any, Dict, List, Optional, Type
|
||||
from concurrent.futures import Future
|
||||
from copy import copy
|
||||
from typing import Any, Dict, List, Optional, Type, Union
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
from opentelemetry.trace import Span
|
||||
@@ -13,7 +14,9 @@ from pydantic_core import PydanticCustomError
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.telemetry.telemetry import Telemetry
|
||||
from crewai.utilities import I18N, ConverterError, Printer
|
||||
from crewai.utilities.converter import Converter, ConverterError
|
||||
from crewai.utilities.i18n import I18N
|
||||
from crewai.utilities.printer import Printer
|
||||
from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
|
||||
|
||||
|
||||
@@ -155,30 +158,46 @@ class Task(BaseModel):
|
||||
)
|
||||
return self
|
||||
|
||||
def wait_for_completion(self) -> str | BaseModel:
|
||||
"""Wait for asynchronous task completion and return the output."""
|
||||
assert self.async_execution, "Task is not set to be executed asynchronously."
|
||||
def execute_sync(
|
||||
self,
|
||||
agent: Optional[BaseAgent] = None,
|
||||
context: Optional[str] = None,
|
||||
tools: Optional[List[Any]] = None,
|
||||
) -> TaskOutput:
|
||||
"""Execute the task synchronously."""
|
||||
return self._execute_core(agent, context, tools)
|
||||
|
||||
if self._thread:
|
||||
self._thread.join()
|
||||
self._thread = None
|
||||
|
||||
assert self.output, "Task output is not set."
|
||||
|
||||
return self.output.exported_output
|
||||
|
||||
def execute( # type: ignore # Missing return statement
|
||||
def execute_async(
|
||||
self,
|
||||
agent: BaseAgent | None = None,
|
||||
context: Optional[str] = None,
|
||||
tools: Optional[List[Any]] = None,
|
||||
) -> str:
|
||||
"""Execute the task.
|
||||
) -> Future[TaskOutput]:
|
||||
"""Execute the task asynchronously."""
|
||||
future = Future()
|
||||
threading.Thread(
|
||||
target=self._execute_task_async, args=(agent, context, tools, future)
|
||||
).start()
|
||||
return future
|
||||
|
||||
Returns:
|
||||
Output of the task.
|
||||
"""
|
||||
def _execute_task_async(
|
||||
self,
|
||||
agent: Optional[BaseAgent],
|
||||
context: Optional[str],
|
||||
tools: Optional[List[Any]],
|
||||
future: Future[TaskOutput],
|
||||
) -> None:
|
||||
"""Execute the task asynchronously with context handling."""
|
||||
result = self._execute_core(agent, context, tools)
|
||||
future.set_result(result)
|
||||
|
||||
def _execute_core(
|
||||
self,
|
||||
agent: Optional[BaseAgent],
|
||||
context: Optional[str],
|
||||
tools: Optional[List[Any]],
|
||||
) -> TaskOutput:
|
||||
"""Run the core execution logic of the task."""
|
||||
self._execution_span = self._telemetry.task_started(self)
|
||||
|
||||
agent = agent or self.agent
|
||||
@@ -188,49 +207,32 @@ class Task(BaseModel):
|
||||
)
|
||||
|
||||
if self.context:
|
||||
# type: ignore # Incompatible types in assignment (expression has type "list[Never]", variable has type "str | None")
|
||||
context = []
|
||||
context_list = []
|
||||
for task in self.context:
|
||||
if task.async_execution:
|
||||
task.wait_for_completion()
|
||||
if task.output:
|
||||
# type: ignore # Item "str" of "str | None" has no attribute "append"
|
||||
context.append(task.output.raw_output)
|
||||
# type: ignore # Argument 1 to "join" of "str" has incompatible type "str | None"; expected "Iterable[str]"
|
||||
context = "\n".join(context)
|
||||
if task.async_execution and task._thread:
|
||||
task._thread.join()
|
||||
if task and task.output:
|
||||
context_list.append(task.output.raw_output)
|
||||
context = "\n".join(context_list)
|
||||
|
||||
self.prompt_context = context
|
||||
tools = tools or self.tools
|
||||
|
||||
if self.async_execution:
|
||||
self._thread = threading.Thread(
|
||||
target=self._execute, args=(agent, self, context, tools)
|
||||
)
|
||||
self._thread.start()
|
||||
else:
|
||||
result = self._execute(
|
||||
task=self,
|
||||
agent=agent,
|
||||
context=context,
|
||||
tools=tools,
|
||||
)
|
||||
return result
|
||||
|
||||
def _execute(self, agent, task, context, tools):
|
||||
result = agent.execute_task(
|
||||
task=task,
|
||||
task=self,
|
||||
context=context,
|
||||
tools=tools,
|
||||
)
|
||||
exported_output = self._export_output(result)
|
||||
|
||||
# type: ignore # the responses are usually str but need to figure out a more elegant solution here
|
||||
self.output = TaskOutput(
|
||||
task_output = TaskOutput(
|
||||
description=self.description,
|
||||
exported_output=exported_output,
|
||||
raw_output=result,
|
||||
pydantic_output=exported_output["pydantic"],
|
||||
json_output=exported_output["json"],
|
||||
agent=agent.role,
|
||||
)
|
||||
self.output = task_output
|
||||
|
||||
if self.callback:
|
||||
self.callback(self.output)
|
||||
@@ -239,7 +241,7 @@ class Task(BaseModel):
|
||||
self._telemetry.task_ended(self._execution_span, self)
|
||||
self._execution_span = None
|
||||
|
||||
return exported_output
|
||||
return task_output
|
||||
|
||||
def prompt(self) -> str:
|
||||
"""Prompt the task.
|
||||
@@ -274,7 +276,7 @@ class Task(BaseModel):
|
||||
"""Increment the delegations counter."""
|
||||
self.delegations += 1
|
||||
|
||||
def copy(self):
|
||||
def copy(self, agents: Optional[List["BaseAgent"]] = None) -> "Task":
|
||||
"""Create a deep copy of the Task."""
|
||||
exclude = {
|
||||
"id",
|
||||
@@ -289,8 +291,12 @@ class Task(BaseModel):
|
||||
cloned_context = (
|
||||
[task.copy() for task in self.context] if self.context else None
|
||||
)
|
||||
cloned_agent = self.agent.copy() if self.agent else None
|
||||
cloned_tools = deepcopy(self.tools) if self.tools else []
|
||||
|
||||
def get_agent_by_role(role: str) -> Union["BaseAgent", None]:
|
||||
return next((agent for agent in agents if agent.role == role), None)
|
||||
|
||||
cloned_agent = get_agent_by_role(self.agent.role) if self.agent else None
|
||||
cloned_tools = copy(self.tools) if self.tools else []
|
||||
|
||||
copied_task = Task(
|
||||
**copied_data,
|
||||
@@ -298,69 +304,95 @@ class Task(BaseModel):
|
||||
agent=cloned_agent,
|
||||
tools=cloned_tools,
|
||||
)
|
||||
|
||||
return copied_task
|
||||
|
||||
def _export_output(self, result: str) -> Any:
|
||||
exported_result = result
|
||||
instructions = "I'm gonna convert this raw text into valid JSON."
|
||||
def _export_output(
|
||||
self, result: str
|
||||
) -> Dict[str, Union[BaseModel, Dict[str, Any]]]:
|
||||
output = {
|
||||
"pydantic": None,
|
||||
"json": None,
|
||||
}
|
||||
|
||||
if self.output_pydantic or self.output_json:
|
||||
model = self.output_pydantic or self.output_json
|
||||
model_output = self._convert_to_model(result)
|
||||
output["pydantic"] = (
|
||||
model_output if isinstance(model_output, BaseModel) else None
|
||||
)
|
||||
output["json"] = model_output if isinstance(model_output, dict) else None
|
||||
|
||||
# try to convert task_output directly to pydantic/json
|
||||
if self.output_file:
|
||||
self._save_output(output["raw"])
|
||||
|
||||
return output
|
||||
|
||||
def _convert_to_model(self, result: str) -> Union[dict, BaseModel, str]:
|
||||
model = self.output_pydantic or self.output_json
|
||||
try:
|
||||
return self._validate_model(result, model)
|
||||
except Exception:
|
||||
return self._handle_partial_json(result, model)
|
||||
|
||||
def _validate_model(
|
||||
self, result: str, model: Type[BaseModel]
|
||||
) -> Union[dict, BaseModel]:
|
||||
exported_result = model.model_validate_json(result)
|
||||
if self.output_json:
|
||||
return exported_result.model_dump()
|
||||
return exported_result
|
||||
|
||||
def _handle_partial_json(
|
||||
self, result: str, model: Type[BaseModel]
|
||||
) -> Union[dict, BaseModel, str]:
|
||||
match = re.search(r"({.*})", result, re.DOTALL)
|
||||
if match:
|
||||
try:
|
||||
# type: ignore # Item "None" of "type[BaseModel] | None" has no attribute "model_validate_json"
|
||||
exported_result = model.model_validate_json(result)
|
||||
exported_result = model.model_validate_json(match.group(0))
|
||||
if self.output_json:
|
||||
# type: ignore # "str" has no attribute "model_dump"
|
||||
return exported_result.model_dump()
|
||||
return exported_result
|
||||
except Exception:
|
||||
# sometimes the response contains valid JSON in the middle of text
|
||||
match = re.search(r"({.*})", result, re.DOTALL)
|
||||
if match:
|
||||
try:
|
||||
# type: ignore # Item "None" of "type[BaseModel] | None" has no attribute "model_validate_json"
|
||||
exported_result = model.model_validate_json(match.group(0))
|
||||
if self.output_json:
|
||||
# type: ignore # "str" has no attribute "model_dump"
|
||||
return exported_result.model_dump()
|
||||
return exported_result
|
||||
except Exception:
|
||||
pass
|
||||
pass
|
||||
|
||||
# type: ignore # Item "None" of "Agent | None" has no attribute "function_calling_llm"
|
||||
llm = getattr(self.agent, "function_calling_llm", None) or self.agent.llm
|
||||
if not self._is_gpt(llm):
|
||||
# type: ignore # Argument "model" to "PydanticSchemaParser" has incompatible type "type[BaseModel] | None"; expected "type[BaseModel]"
|
||||
model_schema = PydanticSchemaParser(model=model).get_schema()
|
||||
instructions = f"{instructions}\n\nThe json should have the following structure, with the following keys:\n{model_schema}"
|
||||
return self._convert_with_instructions(result, model)
|
||||
|
||||
converter = self.agent.get_output_converter(
|
||||
llm=llm, text=result, model=model, instructions=instructions
|
||||
def _convert_with_instructions(
|
||||
self, result: str, model: Type[BaseModel]
|
||||
) -> Union[dict, BaseModel, str]:
|
||||
llm = self.agent.function_calling_llm or self.agent.llm
|
||||
instructions = self._get_conversion_instructions(model, llm)
|
||||
|
||||
converter = Converter(
|
||||
llm=llm, text=result, model=model, instructions=instructions
|
||||
)
|
||||
exported_result = (
|
||||
converter.to_pydantic() if self.output_pydantic else converter.to_json()
|
||||
)
|
||||
|
||||
if isinstance(exported_result, ConverterError):
|
||||
Printer().print(
|
||||
content=f"{exported_result.message} Using raw output instead.",
|
||||
color="red",
|
||||
)
|
||||
|
||||
if self.output_pydantic:
|
||||
exported_result = converter.to_pydantic()
|
||||
elif self.output_json:
|
||||
exported_result = converter.to_json()
|
||||
|
||||
if isinstance(exported_result, ConverterError):
|
||||
Printer().print(
|
||||
content=f"{exported_result.message} Using raw output instead.",
|
||||
color="red",
|
||||
)
|
||||
exported_result = result
|
||||
|
||||
if self.output_file:
|
||||
content = (
|
||||
# type: ignore # "str" has no attribute "json"
|
||||
exported_result if not self.output_pydantic else exported_result.json()
|
||||
)
|
||||
self._save_file(content)
|
||||
return result
|
||||
|
||||
return exported_result
|
||||
|
||||
def _get_conversion_instructions(self, model: Type[BaseModel], llm: Any) -> str:
|
||||
instructions = "I'm gonna convert this raw text into valid JSON."
|
||||
if not self._is_gpt(llm):
|
||||
model_schema = PydanticSchemaParser(model=model).get_schema()
|
||||
instructions = f"{instructions}\n\nThe json should have the following structure, with the following keys:\n{model_schema}"
|
||||
return instructions
|
||||
|
||||
def _save_output(self, content: str) -> None:
|
||||
directory = os.path.dirname(self.output_file)
|
||||
if directory and not os.path.exists(directory):
|
||||
os.makedirs(directory)
|
||||
with open(self.output_file, "w", encoding="utf-8") as file:
|
||||
file.write(content)
|
||||
|
||||
def _is_gpt(self, llm) -> bool:
|
||||
return isinstance(llm, ChatOpenAI) and llm.openai_api_base is None
|
||||
|
||||
|
||||
@@ -1,24 +1,56 @@
|
||||
from typing import Optional, Union
|
||||
from typing import Any, Dict, Optional, Union
|
||||
|
||||
from pydantic import BaseModel, Field, model_validator
|
||||
|
||||
|
||||
# TODO: This is a breaking change. Confirm with @joao
|
||||
class TaskOutput(BaseModel):
|
||||
"""Class that represents the result of a task."""
|
||||
|
||||
description: str = Field(description="Description of the task")
|
||||
summary: Optional[str] = Field(description="Summary of the task", default=None)
|
||||
exported_output: Union[str, BaseModel] = Field(
|
||||
description="Output of the task", default=None
|
||||
raw_output: str = Field(description="Result of the task")
|
||||
pydantic_output: Optional[BaseModel] = Field(
|
||||
description="Pydantic model output", default=None
|
||||
)
|
||||
json_output: Optional[Dict[str, Any]] = Field(
|
||||
description="JSON output", default=None
|
||||
)
|
||||
agent: str = Field(description="Agent that executed the task")
|
||||
raw_output: str = Field(description="Result of the task")
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_summary(self):
|
||||
"""Set the summary field based on the description."""
|
||||
excerpt = " ".join(self.description.split(" ")[:10])
|
||||
self.summary = f"{excerpt}..."
|
||||
return self
|
||||
|
||||
def result(self):
|
||||
return self.exported_output
|
||||
# TODO: Ask @joao what is the desired behavior here
|
||||
def result(self) -> Union[str, BaseModel, Dict[str, Any]]:
|
||||
"""Return the result of the task based on the available output."""
|
||||
if self.pydantic_output:
|
||||
return self.pydantic_output
|
||||
elif self.json_output:
|
||||
return self.json_output
|
||||
else:
|
||||
return self.raw_output
|
||||
|
||||
def __getitem__(self, key: str) -> Any:
|
||||
"""Retrieve a value from the pydantic_output or json_output based on the key."""
|
||||
if self.pydantic_output and hasattr(self.pydantic_output, key):
|
||||
return getattr(self.pydantic_output, key)
|
||||
if self.json_output and key in self.json_output:
|
||||
return self.json_output[key]
|
||||
raise KeyError(f"Key '{key}' not found in pydantic_output or json_output")
|
||||
|
||||
def to_output_dict(self) -> Dict[str, Any]:
|
||||
"""Convert json_output and pydantic_output to a dictionary."""
|
||||
output_dict = {}
|
||||
if self.json_output:
|
||||
output_dict.update(self.json_output)
|
||||
if self.pydantic_output:
|
||||
output_dict.update(self.pydantic_output.model_dump())
|
||||
return output_dict
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.raw_output
|
||||
|
||||
@@ -320,7 +320,7 @@ class Telemetry:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def end_crew(self, crew, output):
|
||||
def end_crew(self, crew, final_string_output):
|
||||
if (self.ready) and (crew.share_crew):
|
||||
try:
|
||||
self._add_attribute(
|
||||
@@ -328,7 +328,9 @@ class Telemetry:
|
||||
"crewai_version",
|
||||
pkg_resources.get_distribution("crewai").version,
|
||||
)
|
||||
self._add_attribute(crew._execution_span, "crew_output", output)
|
||||
self._add_attribute(
|
||||
crew._execution_span, "crew_output", final_string_output
|
||||
)
|
||||
self._add_attribute(
|
||||
crew._execution_span,
|
||||
"crew_tasks_output",
|
||||
|
||||
12
src/crewai/utilities/formatter.py
Normal file
12
src/crewai/utilities/formatter.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from typing import List
|
||||
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
|
||||
|
||||
def aggregate_raw_outputs_from_task_outputs(task_outputs: List[TaskOutput]) -> str:
|
||||
"""Generate string context from the task outputs."""
|
||||
dividers = "\n\n----------\n\n"
|
||||
|
||||
# Join task outputs with dividers
|
||||
context = dividers.join(output.raw_output for output in task_outputs)
|
||||
return context
|
||||
@@ -8,6 +8,8 @@ class Printer:
|
||||
self._print_bold_green(content)
|
||||
elif color == "bold_purple":
|
||||
self._print_bold_purple(content)
|
||||
elif color == "yellow":
|
||||
self._print_yellow(content)
|
||||
else:
|
||||
print(content)
|
||||
|
||||
@@ -22,3 +24,6 @@ class Printer:
|
||||
|
||||
def _print_red(self, content):
|
||||
print("\033[91m {}\033[00m".format(content))
|
||||
|
||||
def _print_yellow(self, content):
|
||||
print("\033[93m {}\033[00m".format(content))
|
||||
|
||||
@@ -8,18 +8,18 @@ from crewai.agents.agent_builder.utilities.base_token_process import TokenProces
|
||||
|
||||
|
||||
class TokenCalcHandler(BaseCallbackHandler):
|
||||
model: str = ""
|
||||
model_name: str = ""
|
||||
token_cost_process: TokenProcess
|
||||
|
||||
def __init__(self, model, token_cost_process):
|
||||
self.model = model
|
||||
def __init__(self, model_name, token_cost_process):
|
||||
self.model_name = model_name
|
||||
self.token_cost_process = token_cost_process
|
||||
|
||||
def on_llm_start(
|
||||
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
|
||||
) -> None:
|
||||
try:
|
||||
encoding = tiktoken.encoding_for_model(self.model)
|
||||
encoding = tiktoken.encoding_for_model(self.model_name)
|
||||
except KeyError:
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
|
||||
|
||||
@@ -12,7 +12,6 @@ from crewai import Agent, Crew, Task
|
||||
from crewai.agents.cache import CacheHandler
|
||||
from crewai.agents.executor import CrewAgentExecutor
|
||||
from crewai.agents.parser import CrewAgentParser
|
||||
|
||||
from crewai.tools.tool_calling import InstructorToolCalling
|
||||
from crewai.tools.tool_usage import ToolUsage
|
||||
from crewai.utilities import RPMController
|
||||
@@ -734,7 +733,7 @@ def test_agent_llm_uses_token_calc_handler_with_llm_has_model_name():
|
||||
|
||||
assert len(agent1.llm.callbacks) == 1
|
||||
assert agent1.llm.callbacks[0].__class__.__name__ == "TokenCalcHandler"
|
||||
assert agent1.llm.callbacks[0].model == "gpt-4o"
|
||||
assert agent1.llm.callbacks[0].model_name == "gpt-4o"
|
||||
assert (
|
||||
agent1.llm.callbacks[0].token_cost_process.__class__.__name__ == "TokenProcess"
|
||||
)
|
||||
|
||||
1804
tests/cassettes/test_async_execution_single_task.yaml
Normal file
1804
tests/cassettes/test_async_execution_single_task.yaml
Normal file
File diff suppressed because it is too large
Load Diff
4694
tests/cassettes/test_async_task_execution.yaml
Normal file
4694
tests/cassettes/test_async_task_execution.yaml
Normal file
File diff suppressed because it is too large
Load Diff
2683
tests/cassettes/test_conditional_skips_task_if_condition_is_met.yaml
Normal file
2683
tests/cassettes/test_conditional_skips_task_if_condition_is_met.yaml
Normal file
File diff suppressed because it is too large
Load Diff
591
tests/cassettes/test_crew_async_kickoff.yaml
Normal file
591
tests/cassettes/test_crew_async_kickoff.yaml
Normal file
@@ -0,0 +1,591 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"content": "You are dog Researcher. You have a lot of experience
|
||||
with dog.\nYour personal goal is: Express hot takes on dog.To give my best complete
|
||||
final answer to the task use the exact following format:\n\nThought: I now can
|
||||
give a great answer\nFinal Answer: my best complete final answer to the task.\nYour
|
||||
final answer must be the great and the most complete as possible, it must be
|
||||
outcome described.\n\nI MUST use these formats, my job depends on it!\nCurrent
|
||||
Task: Give me an analysis around dog.\n\nThis is the expect criteria for your
|
||||
final answer: 1 bullet point about dog that''s under 15 words. \n you MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:\n", "role": "user"}], "model": "gpt-4o",
|
||||
"n": 1, "stop": ["\nObservation"], "stream": true, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, br
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '951'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.34.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.34.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
now"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
can"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
give"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
a"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
great"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
Answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
Dogs"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
are"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
incredibly"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
loyal"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
and"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
provide"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
unmatched"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
companionship"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
to"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
humans"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXYXcf53VmxfiC6Q2NBDG2bPci","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 89d0fa4e7abf53db-ATL
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Tue, 02 Jul 2024 19:17:45 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=6Xl2nvdsXT4uSfQ3C1ZK.LWKGYekVs5ErrLDZOdI.50-1719947865-1.0.1.1-6RQoTCznxe7H868MoxghRegIZaElbG_bN_jbs94hmnsnuR1P9bptoj8o2DbOSvj48ubewyvy8L16mOZHlMLw_A;
|
||||
path=/; expires=Tue, 02-Jul-24 19:47:45 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=kPTMOkGHQp0ytgVUrm3jFNiB9I.DDI2ONPRTr6IMTeo-1719947865623-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '102'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '16000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9997'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '15999783'
|
||||
x-ratelimit-reset-requests:
|
||||
- 14ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_2c5219e228ce79f0131c497230904013
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"content": "You are apple Researcher. You have a lot of
|
||||
experience with apple.\nYour personal goal is: Express hot takes on apple.To
|
||||
give my best complete final answer to the task use the exact following format:\n\nThought:
|
||||
I now can give a great answer\nFinal Answer: my best complete final answer to
|
||||
the task.\nYour final answer must be the great and the most complete as possible,
|
||||
it must be outcome described.\n\nI MUST use these formats, my job depends on
|
||||
it!\nCurrent Task: Give me an analysis around apple.\n\nThis is the expect criteria
|
||||
for your final answer: 1 bullet point about apple that''s under 15 words. \n
|
||||
you MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:\n", "role": "user"}], "model": "gpt-4o",
|
||||
"n": 1, "stop": ["\nObservation"], "stream": true, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, br
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '961'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.34.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.34.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
now"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
can"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
give"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
a"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
great"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":".\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
Answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
Apple"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
revolution"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"izes"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
technology"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
with"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
sleek"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
designs"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":","},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
seamless"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
integration"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":","},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
and"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
innovative"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
user"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
experiences"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXaXAntrwdA2E5Bhxgz9p7q5Nc","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 89d0fa4e7ca907e6-ATL
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Tue, 02 Jul 2024 19:17:45 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=wf2ozMjr46sG0EhuZjpiDNagwTxC05ct3Hn7Y9Rs5AI-1719947865-1.0.1.1-uckxTTr7Yfe6sv4ZznqqrGTEz9E3_Cpp7OAWBIEeNz1Smdjwijw8YV5oYPe_6W4DrEtwVzRDxaqIHlWP55O0QA;
|
||||
path=/; expires=Tue, 02-Jul-24 19:47:45 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=F9pWw4TeoPa8puOm5RN9Gp2oY0lRoN53ChZ1qFYx1S8-1719947865726-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '168'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '16000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9998'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '15999780'
|
||||
x-ratelimit-reset-requests:
|
||||
- 10ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_e6dfeda5935eae030bcc2da526234635
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"content": "You are cat Researcher. You have a lot of experience
|
||||
with cat.\nYour personal goal is: Express hot takes on cat.To give my best complete
|
||||
final answer to the task use the exact following format:\n\nThought: I now can
|
||||
give a great answer\nFinal Answer: my best complete final answer to the task.\nYour
|
||||
final answer must be the great and the most complete as possible, it must be
|
||||
outcome described.\n\nI MUST use these formats, my job depends on it!\nCurrent
|
||||
Task: Give me an analysis around cat.\n\nThis is the expect criteria for your
|
||||
final answer: 1 bullet point about cat that''s under 15 words. \n you MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:\n", "role": "user"}], "model": "gpt-4o",
|
||||
"n": 1, "stop": ["\nObservation"], "stream": true, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, br
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '951'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.34.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.34.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
now"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
can"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
give"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
a"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
great"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
Answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
Cats"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
are"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
master"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"ful"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
hunters"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
and"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
brilliant"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
problem"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"-sol"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"vers"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gdIXPfC85ZAgbI0KqvS9z396XBKw","object":"chat.completion.chunk","created":1719947865,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 89d0fa4e7ae912d7-ATL
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Tue, 02 Jul 2024 19:17:45 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=y7JNZ8WEp.q5pMXLi79ajfcI.F6MfE0GeYLw34Apkf0-1719947865-1.0.1.1-QKklGeYuOnsQROgqMs42XwqKNvW.mPrmcbtaxMnUg3eSgI7TRnRq4qPuSan0ynDt4Hd9NMuls2FR.Caa1MVr9Q;
|
||||
path=/; expires=Tue, 02-Jul-24 19:47:45 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=FVQoSgcvVyiB_o43X6y5MGYgzGojmsQqS.nPObW3JYU-1719947865679-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '132'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '16000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '15999783'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_a06bde4044d3ee75edf08f333139679c
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
585
tests/cassettes/test_crew_async_kickoff_for_each_full_ouput.yaml
Normal file
585
tests/cassettes/test_crew_async_kickoff_for_each_full_ouput.yaml
Normal file
@@ -0,0 +1,585 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"content": "You are dog Researcher. You have a lot of experience
|
||||
with dog.\nYour personal goal is: Express hot takes on dog.To give my best complete
|
||||
final answer to the task use the exact following format:\n\nThought: I now can
|
||||
give a great answer\nFinal Answer: my best complete final answer to the task.\nYour
|
||||
final answer must be the great and the most complete as possible, it must be
|
||||
outcome described.\n\nI MUST use these formats, my job depends on it!\nCurrent
|
||||
Task: Give me an analysis around dog.\n\nThis is the expect criteria for your
|
||||
final answer: 1 bullet point about dog that''s under 15 words. \n you MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:\n", "role": "user"}], "model": "gpt-4o",
|
||||
"n": 1, "stop": ["\nObservation"], "stream": true, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, br
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '951'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.34.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.34.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-9gGly5pkMQFPEoB5vefeCguR5lZg5","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGly5pkMQFPEoB5vefeCguR5lZg5","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGly5pkMQFPEoB5vefeCguR5lZg5","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGly5pkMQFPEoB5vefeCguR5lZg5","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGly5pkMQFPEoB5vefeCguR5lZg5","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
now"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGly5pkMQFPEoB5vefeCguR5lZg5","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
can"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGly5pkMQFPEoB5vefeCguR5lZg5","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
give"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGly5pkMQFPEoB5vefeCguR5lZg5","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
a"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGly5pkMQFPEoB5vefeCguR5lZg5","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
great"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGly5pkMQFPEoB5vefeCguR5lZg5","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGly5pkMQFPEoB5vefeCguR5lZg5","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGly5pkMQFPEoB5vefeCguR5lZg5","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGly5pkMQFPEoB5vefeCguR5lZg5","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
Answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGly5pkMQFPEoB5vefeCguR5lZg5","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGly5pkMQFPEoB5vefeCguR5lZg5","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
Dogs"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGly5pkMQFPEoB5vefeCguR5lZg5","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
are"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGly5pkMQFPEoB5vefeCguR5lZg5","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
incredibly"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGly5pkMQFPEoB5vefeCguR5lZg5","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
loyal"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGly5pkMQFPEoB5vefeCguR5lZg5","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
creatures"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGly5pkMQFPEoB5vefeCguR5lZg5","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
and"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGly5pkMQFPEoB5vefeCguR5lZg5","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
make"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGly5pkMQFPEoB5vefeCguR5lZg5","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
excellent"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGly5pkMQFPEoB5vefeCguR5lZg5","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
companions"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGly5pkMQFPEoB5vefeCguR5lZg5","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGly5pkMQFPEoB5vefeCguR5lZg5","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 89c8b85b1adac009-ATL
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Mon, 01 Jul 2024 19:14:38 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=EKr3m8uVLAQymRGlOvcrrYSniXqH.I6nlooc.HtxR58-1719861278-1.0.1.1-ShT9PH0Sv.qvbXjw_BhtziPUPaOIFBxlzXEIk_MXnfJ5PxggSkkaN25IKMZglSd3N2X.U2pWvFwywNQXiXlRnQ;
|
||||
path=/; expires=Mon, 01-Jul-24 19:44:38 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=NdoJw5c7TqrbsjEH.ABD06WhM3d1BUh2BfsxOwuclSY-1719861278155-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '110'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '16000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9998'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '15999741'
|
||||
x-ratelimit-reset-requests:
|
||||
- 11ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_266fff38f6e0a997187154e25d6615e8
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"content": "You are apple Researcher. You have a lot of
|
||||
experience with apple.\nYour personal goal is: Express hot takes on apple.To
|
||||
give my best complete final answer to the task use the exact following format:\n\nThought:
|
||||
I now can give a great answer\nFinal Answer: my best complete final answer to
|
||||
the task.\nYour final answer must be the great and the most complete as possible,
|
||||
it must be outcome described.\n\nI MUST use these formats, my job depends on
|
||||
it!\nCurrent Task: Give me an analysis around apple.\n\nThis is the expect criteria
|
||||
for your final answer: 1 bullet point about apple that''s under 15 words. \n
|
||||
you MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:\n", "role": "user"}], "model": "gpt-4o",
|
||||
"n": 1, "stop": ["\nObservation"], "stream": true, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, br
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '961'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.34.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.34.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-9gGlyTsxWJNVf7aQLLtKEYroHrIXk","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyTsxWJNVf7aQLLtKEYroHrIXk","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyTsxWJNVf7aQLLtKEYroHrIXk","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
now"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyTsxWJNVf7aQLLtKEYroHrIXk","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
can"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyTsxWJNVf7aQLLtKEYroHrIXk","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
give"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyTsxWJNVf7aQLLtKEYroHrIXk","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
a"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyTsxWJNVf7aQLLtKEYroHrIXk","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
great"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyTsxWJNVf7aQLLtKEYroHrIXk","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyTsxWJNVf7aQLLtKEYroHrIXk","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":".\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyTsxWJNVf7aQLLtKEYroHrIXk","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyTsxWJNVf7aQLLtKEYroHrIXk","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
Answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyTsxWJNVf7aQLLtKEYroHrIXk","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyTsxWJNVf7aQLLtKEYroHrIXk","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
Apple''s"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyTsxWJNVf7aQLLtKEYroHrIXk","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
ecosystem"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyTsxWJNVf7aQLLtKEYroHrIXk","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
fosters"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyTsxWJNVf7aQLLtKEYroHrIXk","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
seamless"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyTsxWJNVf7aQLLtKEYroHrIXk","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
integration"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyTsxWJNVf7aQLLtKEYroHrIXk","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
but"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyTsxWJNVf7aQLLtKEYroHrIXk","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
can"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyTsxWJNVf7aQLLtKEYroHrIXk","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
limit"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyTsxWJNVf7aQLLtKEYroHrIXk","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
user"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyTsxWJNVf7aQLLtKEYroHrIXk","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
flexibility"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyTsxWJNVf7aQLLtKEYroHrIXk","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
and"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyTsxWJNVf7aQLLtKEYroHrIXk","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
choice"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyTsxWJNVf7aQLLtKEYroHrIXk","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyTsxWJNVf7aQLLtKEYroHrIXk","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 89c8b85b19501351-ATL
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Mon, 01 Jul 2024 19:14:38 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=R2LwekshY1B9Z5rF_RyOforiY4rLQreEXx6gOzhnZCI-1719861278-1.0.1.1-y_WShmsjsavKJEOt9Yw8nwjv05e4WdVaZGu8pYcS4z0wF9heVD.0C9W2aQodYxaWIQvkXiPm7y93ma7WCxUdBQ;
|
||||
path=/; expires=Mon, 01-Jul-24 19:44:38 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=TLDVkWpa_eP62.b4QTrhowr4J_DsXwMZ2nGDaWD4ebU-1719861278245-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '99'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '16000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '15999780'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_39f52bd8b06ab4ada2c16853345eb6dc
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"content": "You are cat Researcher. You have a lot of experience
|
||||
with cat.\nYour personal goal is: Express hot takes on cat.To give my best complete
|
||||
final answer to the task use the exact following format:\n\nThought: I now can
|
||||
give a great answer\nFinal Answer: my best complete final answer to the task.\nYour
|
||||
final answer must be the great and the most complete as possible, it must be
|
||||
outcome described.\n\nI MUST use these formats, my job depends on it!\nCurrent
|
||||
Task: Give me an analysis around cat.\n\nThis is the expect criteria for your
|
||||
final answer: 1 bullet point about cat that''s under 15 words. \n you MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:\n", "role": "user"}], "model": "gpt-4o",
|
||||
"n": 1, "stop": ["\nObservation"], "stream": true, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, br
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '951'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.34.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.34.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-9gGlyAPhJWtbWCzqHs7gAID5K4T0X","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyAPhJWtbWCzqHs7gAID5K4T0X","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyAPhJWtbWCzqHs7gAID5K4T0X","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyAPhJWtbWCzqHs7gAID5K4T0X","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyAPhJWtbWCzqHs7gAID5K4T0X","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
now"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyAPhJWtbWCzqHs7gAID5K4T0X","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
can"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyAPhJWtbWCzqHs7gAID5K4T0X","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
give"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyAPhJWtbWCzqHs7gAID5K4T0X","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
a"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyAPhJWtbWCzqHs7gAID5K4T0X","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
great"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyAPhJWtbWCzqHs7gAID5K4T0X","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyAPhJWtbWCzqHs7gAID5K4T0X","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyAPhJWtbWCzqHs7gAID5K4T0X","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyAPhJWtbWCzqHs7gAID5K4T0X","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
Answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyAPhJWtbWCzqHs7gAID5K4T0X","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyAPhJWtbWCzqHs7gAID5K4T0X","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
Cats"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyAPhJWtbWCzqHs7gAID5K4T0X","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
are"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyAPhJWtbWCzqHs7gAID5K4T0X","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
natural"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyAPhJWtbWCzqHs7gAID5K4T0X","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
hunters"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyAPhJWtbWCzqHs7gAID5K4T0X","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
with"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyAPhJWtbWCzqHs7gAID5K4T0X","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
unique"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyAPhJWtbWCzqHs7gAID5K4T0X","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
personalities"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyAPhJWtbWCzqHs7gAID5K4T0X","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
and"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyAPhJWtbWCzqHs7gAID5K4T0X","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
strong"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyAPhJWtbWCzqHs7gAID5K4T0X","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
territorial"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyAPhJWtbWCzqHs7gAID5K4T0X","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
instincts"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyAPhJWtbWCzqHs7gAID5K4T0X","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGlyAPhJWtbWCzqHs7gAID5K4T0X","object":"chat.completion.chunk","created":1719861278,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 89c8b85b1fde4546-ATL
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Mon, 01 Jul 2024 19:14:38 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=KdRY51WYOZSedMQIfG_vPmrPdO67._RkSjV0nq.khUk-1719861278-1.0.1.1-r0uJtNVxaGm2OCkQliG.tsX3vekPCDQb2IET3ywu41Igu1Qfz01rhz_WlvKIllsZlXIyd6rvHT7NxLo.UOtD7Q;
|
||||
path=/; expires=Mon, 01-Jul-24 19:44:38 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=Qqu1FBant56uM9Al0JmDl7QMk9cRcojzFUt8volvWPo-1719861278308-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '156'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '16000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '15999783'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_a0c6e5796af340d6720b9cfd55df703e
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
File diff suppressed because it is too large
Load Diff
314
tests/cassettes/test_crew_full_output.yaml
Normal file
314
tests/cassettes/test_crew_full_output.yaml
Normal file
@@ -0,0 +1,314 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"content": "You are test role. test backstory\nYour personal
|
||||
goal is: test goalTo give my best complete final answer to the task use the
|
||||
exact following format:\n\nThought: I now can give a great answer\nFinal Answer:
|
||||
my best complete final answer to the task.\nYour final answer must be the great
|
||||
and the most complete as possible, it must be outcome described.\n\nI MUST use
|
||||
these formats, my job depends on it!\nCurrent Task: just say hi!\n\nThis is
|
||||
the expect criteria for your final answer: your greeting \n you MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:\n", "role": "user"}], "model": "gpt-4o",
|
||||
"n": 1, "stop": ["\nObservation"], "stream": true, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, br
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '853'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.34.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.34.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-9gGvixvu2sSTjML4oN3fsoMeTHbew","object":"chat.completion.chunk","created":1719861882,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvixvu2sSTjML4oN3fsoMeTHbew","object":"chat.completion.chunk","created":1719861882,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvixvu2sSTjML4oN3fsoMeTHbew","object":"chat.completion.chunk","created":1719861882,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvixvu2sSTjML4oN3fsoMeTHbew","object":"chat.completion.chunk","created":1719861882,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvixvu2sSTjML4oN3fsoMeTHbew","object":"chat.completion.chunk","created":1719861882,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
now"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvixvu2sSTjML4oN3fsoMeTHbew","object":"chat.completion.chunk","created":1719861882,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
can"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvixvu2sSTjML4oN3fsoMeTHbew","object":"chat.completion.chunk","created":1719861882,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
give"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvixvu2sSTjML4oN3fsoMeTHbew","object":"chat.completion.chunk","created":1719861882,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
a"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvixvu2sSTjML4oN3fsoMeTHbew","object":"chat.completion.chunk","created":1719861882,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
great"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvixvu2sSTjML4oN3fsoMeTHbew","object":"chat.completion.chunk","created":1719861882,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvixvu2sSTjML4oN3fsoMeTHbew","object":"chat.completion.chunk","created":1719861882,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvixvu2sSTjML4oN3fsoMeTHbew","object":"chat.completion.chunk","created":1719861882,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvixvu2sSTjML4oN3fsoMeTHbew","object":"chat.completion.chunk","created":1719861882,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
Answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvixvu2sSTjML4oN3fsoMeTHbew","object":"chat.completion.chunk","created":1719861882,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvixvu2sSTjML4oN3fsoMeTHbew","object":"chat.completion.chunk","created":1719861882,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"
|
||||
Hi"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvixvu2sSTjML4oN3fsoMeTHbew","object":"chat.completion.chunk","created":1719861882,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{"content":"!"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvixvu2sSTjML4oN3fsoMeTHbew","object":"chat.completion.chunk","created":1719861882,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_ce0793330f","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 89c8c71ccad81823-ATL
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Mon, 01 Jul 2024 19:24:42 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=uU.2MR0L4Mv3xs4DzFlWOQLVId1dJXQBlWffhr9mqxU-1719861882-1.0.1.1-JSKN2_O9iYj8QCZjy0IGiunZxvXimz5Kzv5wQJedVua5E6WIl1UvP.wguXbK0cds7ayJReYnR8v8oAN2rmtnNQ;
|
||||
path=/; expires=Mon, 01-Jul-24 19:54:42 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=yc5Q7WKbO5zoiGNQx86HpHNM3HeXi2HxCxw31lL_UuU-1719861882665-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '86'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '16000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '15999808'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_25d95f35048bf71e28d73fbed6576a6c
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"content": "You are test role. test backstory\nYour personal
|
||||
goal is: test goalTo give my best complete final answer to the task use the
|
||||
exact following format:\n\nThought: I now can give a great answer\nFinal Answer:
|
||||
my best complete final answer to the task.\nYour final answer must be the great
|
||||
and the most complete as possible, it must be outcome described.\n\nI MUST use
|
||||
these formats, my job depends on it!\nCurrent Task: just say hello!\n\nThis
|
||||
is the expect criteria for your final answer: your greeting \n you MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nThis is the
|
||||
context you''re working with:\nHi!\n\nBegin! This is VERY important to you,
|
||||
use the tools available and give your best Final Answer, your job depends on
|
||||
it!\n\nThought:\n", "role": "user"}], "model": "gpt-4o", "n": 1, "stop": ["\nObservation"],
|
||||
"stream": true, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, br
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '905'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=uU.2MR0L4Mv3xs4DzFlWOQLVId1dJXQBlWffhr9mqxU-1719861882-1.0.1.1-JSKN2_O9iYj8QCZjy0IGiunZxvXimz5Kzv5wQJedVua5E6WIl1UvP.wguXbK0cds7ayJReYnR8v8oAN2rmtnNQ;
|
||||
_cfuvid=yc5Q7WKbO5zoiGNQx86HpHNM3HeXi2HxCxw31lL_UuU-1719861882665-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.34.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.34.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-9gGvjRHciTPrlyXWRGu5z5C56L10c","object":"chat.completion.chunk","created":1719861883,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvjRHciTPrlyXWRGu5z5C56L10c","object":"chat.completion.chunk","created":1719861883,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvjRHciTPrlyXWRGu5z5C56L10c","object":"chat.completion.chunk","created":1719861883,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvjRHciTPrlyXWRGu5z5C56L10c","object":"chat.completion.chunk","created":1719861883,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvjRHciTPrlyXWRGu5z5C56L10c","object":"chat.completion.chunk","created":1719861883,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
now"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvjRHciTPrlyXWRGu5z5C56L10c","object":"chat.completion.chunk","created":1719861883,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
can"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvjRHciTPrlyXWRGu5z5C56L10c","object":"chat.completion.chunk","created":1719861883,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
give"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvjRHciTPrlyXWRGu5z5C56L10c","object":"chat.completion.chunk","created":1719861883,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
a"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvjRHciTPrlyXWRGu5z5C56L10c","object":"chat.completion.chunk","created":1719861883,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
great"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvjRHciTPrlyXWRGu5z5C56L10c","object":"chat.completion.chunk","created":1719861883,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvjRHciTPrlyXWRGu5z5C56L10c","object":"chat.completion.chunk","created":1719861883,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvjRHciTPrlyXWRGu5z5C56L10c","object":"chat.completion.chunk","created":1719861883,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvjRHciTPrlyXWRGu5z5C56L10c","object":"chat.completion.chunk","created":1719861883,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
Answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvjRHciTPrlyXWRGu5z5C56L10c","object":"chat.completion.chunk","created":1719861883,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvjRHciTPrlyXWRGu5z5C56L10c","object":"chat.completion.chunk","created":1719861883,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"
|
||||
Hello"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvjRHciTPrlyXWRGu5z5C56L10c","object":"chat.completion.chunk","created":1719861883,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{"content":"!"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gGvjRHciTPrlyXWRGu5z5C56L10c","object":"chat.completion.chunk","created":1719861883,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_d576307f90","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 89c8c7202e0f1823-ATL
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Mon, 01 Jul 2024 19:24:43 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '82'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '16000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '15999794'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_754b5067e8f56d5c1182dc0f57be0e45
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
400750
tests/cassettes/test_crew_kickoff_for_each_full_ouput.yaml
Normal file
400750
tests/cassettes/test_crew_kickoff_for_each_full_ouput.yaml
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,193 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"content": "You are Researcher. You''re an expert researcher,
|
||||
specialized in technology, software engineering, AI and startups. You work as
|
||||
a freelancer and is now working on doing research and analysis for a new customer.\nYour
|
||||
personal goal is: Make the best research and analysis on content about AI and
|
||||
AI agentsTo give my best complete final answer to the task use the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: my best complete
|
||||
final answer to the task.\nYour final answer must be the great and the most
|
||||
complete as possible, it must be outcome described.\n\nI MUST use these formats,
|
||||
my job depends on it!\nCurrent Task: Look at the available data nd give me a
|
||||
sense on the total number of sales.\n\nThis is the expect criteria for your
|
||||
final answer: The total number of sales as an integer \n you MUST return the
|
||||
actual complete content as the final answer, not a summary.\n\nBegin! This is
|
||||
VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:\n", "role": "user"}], "model": "gpt-4o",
|
||||
"n": 1, "stop": ["\nObservation"], "stream": true, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, br
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1178'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.34.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.34.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
now"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
can"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
give"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
a"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
great"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
Answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
The"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
total"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
number"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
of"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
sales"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
is"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"150"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"0"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9gJkkQs40FNqD9UjPrPbDEUN4XeLR","object":"chat.completion.chunk","created":1719872734,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 89c9d0107c8abd30-ATL
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Mon, 01 Jul 2024 22:25:35 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=xIvvDveyc7bpEywphx5N4EscKoZiGAT_yDVu3aFAWZ4-1719872735-1.0.1.1-ZOUYc2kEes8fxrMFgGdVppzOh9nPbl4y1Syv73ORt38FBXePWFSTJrFZCZRU.zob6ks9nWzr2vBIZbBQdAOOGQ;
|
||||
path=/; expires=Mon, 01-Jul-24 22:55:35 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=aG1BGRRkNAyxmctM98.DLqSNJ2Cx_OQYsMRQbd03.bo-1719872735091-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '80'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '16000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '15999725'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 1ms
|
||||
x-request-id:
|
||||
- req_c90015b7584729268f48a8b33ff7c5ea
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
235903
tests/cassettes/test_hierarchical_async_task_execution_completion.yaml
Normal file
235903
tests/cassettes/test_hierarchical_async_task_execution_completion.yaml
Normal file
File diff suppressed because it is too large
Load Diff
6244
tests/cassettes/test_sequential_async_task_execution_completion.yaml
Normal file
6244
tests/cassettes/test_sequential_async_task_execution_completion.yaml
Normal file
File diff suppressed because it is too large
Load Diff
333
tests/cassettes/test_single_task_with_async_execution.yaml
Normal file
333
tests/cassettes/test_single_task_with_async_execution.yaml
Normal file
@@ -0,0 +1,333 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"content": "You are Researcher. You''re an expert researcher,
|
||||
specialized in technology, software engineering, AI and startups. You work as
|
||||
a freelancer and is now working on doing research and analysis for a new customer.\nYour
|
||||
personal goal is: Make the best research and analysis on content about AI and
|
||||
AI agentsTo give my best complete final answer to the task use the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: my best complete
|
||||
final answer to the task.\nYour final answer must be the great and the most
|
||||
complete as possible, it must be outcome described.\n\nI MUST use these formats,
|
||||
my job depends on it!\nCurrent Task: Generate a list of 5 interesting ideas
|
||||
to explore for an article, where each bulletpoint is under 15 words.\n\nThis
|
||||
is the expect criteria for your final answer: Bullet point list of 5 important
|
||||
events. No additional commentary. \n you MUST return the actual complete content
|
||||
as the final answer, not a summary.\n\nBegin! This is VERY important to you,
|
||||
use the tools available and give your best Final Answer, your job depends on
|
||||
it!\n\nThought:\n", "role": "user"}], "model": "gpt-4o", "n": 1, "stop": ["\nObservation"],
|
||||
"stream": true, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, br
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1237'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.34.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.34.0
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
now"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
can"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
give"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
a"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
great"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
Answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"-"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
The"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
impact"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
of"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
AI"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
agents"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
on"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
remote"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
work"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
productivity"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":".\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"-"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
Ethical"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
considerations"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
in"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
AI"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"-driven"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
decision"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"-making"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":".\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"-"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
How"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
AI"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
agents"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
are"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
transforming"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
customer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
service"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":".\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"-"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
The"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
role"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
of"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
AI"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
in"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
personalized"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
learning"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
experiences"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":".\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"-"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
AI"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
advancements"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
in"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
healthcare"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"
|
||||
diagnostics"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9ce1Nupvw1SEEUL1MxkSS1S2KMYoY","object":"chat.completion.chunk","created":1718997333,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_3e7d703517","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 897653f3e8ba7ba2-ATL
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Fri, 21 Jun 2024 19:15:33 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=9ch02HraQXiYJx8jBtYzKXOBjm4nToP.1sBISDFt9Gc-1718997333-1.0.1.1-Ykz1rbMzc2Zo8VV5rBwixPedTuO8s_38psrpuLCSy2B.YIyCCXWMGI_JT5WGQVp2gacOcxjWMSVhOOY85gf9QQ;
|
||||
path=/; expires=Fri, 21-Jun-24 19:45:33 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=0srdhmUvYEBaQ2xn7BzySIPRoIiEPWzmvngtQRdnpUY-1718997333518-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '165'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '12000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '11999712'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 1ms
|
||||
x-request-id:
|
||||
- 92f00e3ecc754086e0ddf2d998f6f671
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
1097
tests/cassettes/test_three_task_with_async_execution.yaml
Normal file
1097
tests/cassettes/test_three_task_with_async_execution.yaml
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,18 +1,22 @@
|
||||
"""Test Agent creation and execution basic functionality."""
|
||||
|
||||
import json
|
||||
from concurrent.futures import Future
|
||||
from unittest import mock
|
||||
from unittest.mock import patch
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
import pydantic_core
|
||||
import pytest
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.agents.cache import CacheHandler
|
||||
from crewai.conditional_task import ConditionalTask
|
||||
from crewai.crew import Crew
|
||||
from crewai.crews.crew_output import CrewOutput
|
||||
from crewai.memory.contextual.contextual_memory import ContextualMemory
|
||||
from crewai.process import Process
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.utilities import Logger, RPMController
|
||||
|
||||
ceo = Agent(
|
||||
@@ -136,11 +140,57 @@ def test_crew_creation():
|
||||
tasks=tasks,
|
||||
)
|
||||
|
||||
assert (
|
||||
crew.kickoff()
|
||||
== "1. **The Rise of AI in Healthcare**: The convergence of AI and healthcare is a promising frontier, offering unprecedented opportunities for disease diagnosis and patient outcome prediction. AI's potential to revolutionize healthcare lies in its capacity to synthesize vast amounts of data, generating precise and efficient results. This technological breakthrough, however, is not just about improving accuracy and efficiency; it's about saving lives. As we stand on the precipice of this transformative era, we must prepare for the complex challenges and ethical questions it poses, while embracing its ability to reshape healthcare as we know it.\n\n2. **Ethical Implications of AI**: As AI intertwines with our daily lives, it presents a complex web of ethical dilemmas. This fusion of technology, philosophy, and ethics is not merely academically intriguing but profoundly impacts the fabric of our society. The questions raised range from decision-making transparency to accountability, and from privacy to potential biases. As we navigate this ethical labyrinth, it is crucial to establish robust frameworks and regulations to ensure that AI serves humanity, and not the other way around.\n\n3. **AI and Data Privacy**: The rise of AI brings with it an insatiable appetite for data, spawning new debates around privacy rights. Balancing the potential benefits of AI with the right to privacy is a unique challenge that intersects technology, law, and human rights. In an increasingly digital world, where personal information forms the backbone of many services, we must grapple with these issues. It's time to redefine the concept of privacy and devise innovative solutions that ensure our digital footprints are not abused.\n\n4. **AI in Job Market**: The discourse around AI's impact on employment is a narrative of contrast, a tale of displacement and creation. On one hand, AI threatens to automate a multitude of jobs, on the other, it promises to create new roles that we cannot yet imagine. This intersection of technology, economics, and labor rights is a critical dialogue that will shape our future. As we stand at this crossroads, we must not only brace ourselves for the changes but also seize the opportunities that this technological wave brings.\n\n5. **Future of AI Agents**: The evolution of AI agents signifies a leap towards a future where AI is not just a tool, but a partner. These sophisticated AI agents, employed in customer service to personal assistants, are redefining our interactions with technology. As we gaze into the future of AI agents, we see a landscape of possibilities and challenges. This journey will be about harnessing the potential of AI agents while navigating the issues of trust, dependence, and ethical use."
|
||||
result = crew.kickoff()
|
||||
|
||||
expected_string_output = "1. **The Rise of AI in Healthcare**: The convergence of AI and healthcare is a promising frontier, offering unprecedented opportunities for disease diagnosis and patient outcome prediction. AI's potential to revolutionize healthcare lies in its capacity to synthesize vast amounts of data, generating precise and efficient results. This technological breakthrough, however, is not just about improving accuracy and efficiency; it's about saving lives. As we stand on the precipice of this transformative era, we must prepare for the complex challenges and ethical questions it poses, while embracing its ability to reshape healthcare as we know it.\n\n2. **Ethical Implications of AI**: As AI intertwines with our daily lives, it presents a complex web of ethical dilemmas. This fusion of technology, philosophy, and ethics is not merely academically intriguing but profoundly impacts the fabric of our society. The questions raised range from decision-making transparency to accountability, and from privacy to potential biases. As we navigate this ethical labyrinth, it is crucial to establish robust frameworks and regulations to ensure that AI serves humanity, and not the other way around.\n\n3. **AI and Data Privacy**: The rise of AI brings with it an insatiable appetite for data, spawning new debates around privacy rights. Balancing the potential benefits of AI with the right to privacy is a unique challenge that intersects technology, law, and human rights. In an increasingly digital world, where personal information forms the backbone of many services, we must grapple with these issues. It's time to redefine the concept of privacy and devise innovative solutions that ensure our digital footprints are not abused.\n\n4. **AI in Job Market**: The discourse around AI's impact on employment is a narrative of contrast, a tale of displacement and creation. On one hand, AI threatens to automate a multitude of jobs, on the other, it promises to create new roles that we cannot yet imagine. This intersection of technology, economics, and labor rights is a critical dialogue that will shape our future. As we stand at this crossroads, we must not only brace ourselves for the changes but also seize the opportunities that this technological wave brings.\n\n5. **Future of AI Agents**: The evolution of AI agents signifies a leap towards a future where AI is not just a tool, but a partner. These sophisticated AI agents, employed in customer service to personal assistants, are redefining our interactions with technology. As we gaze into the future of AI agents, we see a landscape of possibilities and challenges. This journey will be about harnessing the potential of AI agents while navigating the issues of trust, dependence, and ethical use."
|
||||
|
||||
assert str(result) == expected_string_output
|
||||
assert result.raw_output() == expected_string_output
|
||||
assert isinstance(result, CrewOutput)
|
||||
assert len(result.tasks_output) == len(tasks)
|
||||
assert result.result() == [expected_string_output]
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_sync_task_execution():
|
||||
from unittest.mock import patch
|
||||
|
||||
tasks = [
|
||||
Task(
|
||||
description="Give me a list of 5 interesting ideas to explore for an article, what makes them unique and interesting.",
|
||||
expected_output="Bullet point list of 5 important events.",
|
||||
agent=researcher,
|
||||
),
|
||||
Task(
|
||||
description="Write an amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
|
||||
expected_output="A 4 paragraph article about AI.",
|
||||
agent=writer,
|
||||
),
|
||||
]
|
||||
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
process=Process.sequential,
|
||||
tasks=tasks,
|
||||
)
|
||||
|
||||
mock_task_output = TaskOutput(
|
||||
description="Mock description", raw_output="mocked output", agent="mocked agent"
|
||||
)
|
||||
|
||||
# Because we are mocking execute_sync, we never hit the underlying _execute_core
|
||||
# which sets the output attribute of the task
|
||||
for task in tasks:
|
||||
task.output = mock_task_output
|
||||
|
||||
with patch.object(
|
||||
Task, "execute_sync", return_value=mock_task_output
|
||||
) as mock_execute_sync:
|
||||
crew.kickoff()
|
||||
|
||||
# Assert that execute_sync was called for each task
|
||||
assert mock_execute_sync.call_count == len(tasks)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_hierarchical_process():
|
||||
@@ -157,9 +207,11 @@ def test_hierarchical_process():
|
||||
manager_llm=ChatOpenAI(temperature=0, model="gpt-4"),
|
||||
tasks=[task],
|
||||
)
|
||||
|
||||
result = crew.kickoff()
|
||||
|
||||
assert (
|
||||
result
|
||||
result.raw_output()
|
||||
== "1. 'Demystifying AI: An in-depth exploration of Artificial Intelligence for the layperson' - In this piece, we will unravel the enigma of AI, simplifying its complexities into digestible information for the everyday individual. By using relatable examples and analogies, we will journey through the neural networks and machine learning algorithms that define AI, without the jargon and convoluted explanations that often accompany such topics.\n\n2. 'The Role of AI in Startups: A Game Changer?' - Startups today are harnessing the power of AI to revolutionize their businesses. This article will delve into how AI, as an innovative force, is shaping the startup ecosystem, transforming everything from customer service to product development. We'll explore real-life case studies of startups that have leveraged AI to accelerate their growth and disrupt their respective industries.\n\n3. 'AI and Ethics: Navigating the Complex Landscape' - AI brings with it not just technological advancements, but ethical dilemmas as well. This article will engage readers in a thought-provoking discussion on the ethical implications of AI, exploring issues like bias in algorithms, privacy concerns, job displacement, and the moral responsibility of AI developers. We will also discuss potential solutions and frameworks to address these challenges.\n\n4. 'Unveiling the AI Agents: The Future of Customer Service' - AI agents are poised to reshape the customer service landscape, offering businesses the ability to provide round-the-clock support and personalized experiences. In this article, we'll dive deep into the world of AI agents, examining how they work, their benefits and limitations, and how they're set to redefine customer interactions in the digital age.\n\n5. 'From Science Fiction to Reality: AI in Everyday Life' - AI, once a concept limited to the realm of sci-fi, has now permeated our daily lives. This article will highlight the ubiquitous presence of AI, from voice assistants and recommendation algorithms, to autonomous vehicles and smart homes. We'll explore how AI, in its various forms, is transforming our everyday experiences, making the future seem a lot closer than we imagined."
|
||||
)
|
||||
|
||||
@@ -194,8 +246,10 @@ def test_crew_with_delegating_agents():
|
||||
tasks=tasks,
|
||||
)
|
||||
|
||||
result = crew.kickoff()
|
||||
|
||||
assert (
|
||||
crew.kickoff()
|
||||
result.raw_output()
|
||||
== "AI Agents, simply put, are intelligent systems that can perceive their environment and take actions to reach specific goals. Imagine them as digital assistants that can learn, adapt and make decisions. They operate in the realms of software or hardware, like a chatbot on a website or a self-driving car. The key to their intelligence is their ability to learn from their experiences, making them better at their tasks over time. In today's interconnected world, AI agents are transforming our lives. They enhance customer service experiences, streamline business processes, and even predict trends in data. Vehicles equipped with AI agents are making transportation safer. In healthcare, AI agents are helping to diagnose diseases, personalizing treatment plans, and monitoring patient health. As we embrace the digital era, these AI agents are not just important, they're becoming indispensable, shaping a future where technology works intuitively and intelligently to meet our needs."
|
||||
)
|
||||
|
||||
@@ -384,16 +438,18 @@ def test_crew_full_ouput():
|
||||
|
||||
result = crew.kickoff()
|
||||
|
||||
assert result == {
|
||||
"final_output": "Hello!",
|
||||
"tasks_outputs": [task1.output, task2.output],
|
||||
"usage_metrics": {
|
||||
"total_tokens": 517,
|
||||
"prompt_tokens": 466,
|
||||
"completion_tokens": 51,
|
||||
"successful_requests": 3,
|
||||
},
|
||||
expected_usage_metrics = {
|
||||
"total_tokens": 348,
|
||||
"prompt_tokens": 314,
|
||||
"completion_tokens": 34,
|
||||
"successful_requests": 2,
|
||||
}
|
||||
print(result.output)
|
||||
assert result.token_usage == expected_usage_metrics
|
||||
expected_final_string_output = "Hello!"
|
||||
assert result.tasks_output == [task1.output, task2.output]
|
||||
assert result.result() == expected_final_string_output
|
||||
assert result.raw_output() == expected_final_string_output
|
||||
|
||||
|
||||
def test_agents_rpm_is_never_set_if_crew_max_RPM_is_not_set():
|
||||
@@ -416,11 +472,58 @@ def test_agents_rpm_is_never_set_if_crew_max_RPM_is_not_set():
|
||||
assert agent._rpm_controller is None
|
||||
|
||||
|
||||
def test_async_task_execution():
|
||||
import threading
|
||||
from unittest.mock import patch
|
||||
"""
|
||||
Future tests:
|
||||
TODO: 1 async task, 1 sync task. Make sure sync task waits for async to finish before starting.[]
|
||||
TODO: 3 async tasks, 1 sync task. Make sure sync task waits for async to finish before starting.
|
||||
TODO: 1 sync task, 1 async task. Make sure we wait for result from async before finishing crew.
|
||||
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
TODO: 3 async tasks, 1 sync task. Make sure context from all 3 async tasks is passed to sync task.
|
||||
TODO: 3 async tasks, 1 sync task. Pass in context from only 1 async task to sync task.
|
||||
|
||||
TODO: Test pydantic output of CrewOutput and test type in CrewOutput result
|
||||
TODO: Test json output of CrewOutput and test type in CrewOutput result
|
||||
|
||||
TODO: TEST THE SAME THING BUT WITH HIERARCHICAL PROCESS
|
||||
"""
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_sequential_async_task_execution_completion():
|
||||
list_ideas = Task(
|
||||
description="Give me a list of 5 interesting ideas to explore for na article, what makes them unique and interesting.",
|
||||
expected_output="Bullet point list of 5 important events.",
|
||||
agent=researcher,
|
||||
async_execution=True,
|
||||
)
|
||||
list_important_history = Task(
|
||||
description="Research the history of AI and give me the 5 most important events that shaped the technology.",
|
||||
expected_output="Bullet point list of 5 important events.",
|
||||
agent=researcher,
|
||||
async_execution=True,
|
||||
)
|
||||
write_article = Task(
|
||||
description="Write an article about the history of AI and its most important events.",
|
||||
expected_output="A 4 paragraph article about AI.",
|
||||
agent=writer,
|
||||
context=[list_ideas, list_important_history],
|
||||
)
|
||||
|
||||
sequential_crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
process=Process.sequential,
|
||||
tasks=[list_ideas, list_important_history, write_article],
|
||||
)
|
||||
|
||||
sequential_result = sequential_crew.kickoff()
|
||||
assert sequential_result.raw_output().startswith(
|
||||
"**The Evolution of Artificial Intelligence: A Journey Through Milestones**"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_hierarchical_async_task_execution_completion():
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
list_ideas = Task(
|
||||
description="Give me a list of 5 interesting ideas to explore for na article, what makes them unique and interesting.",
|
||||
@@ -441,31 +544,437 @@ def test_async_task_execution():
|
||||
context=[list_ideas, list_important_history],
|
||||
)
|
||||
|
||||
hierarchical_crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
process=Process.hierarchical,
|
||||
tasks=[list_ideas, list_important_history, write_article],
|
||||
manager_llm=ChatOpenAI(temperature=0, model="gpt-4"),
|
||||
)
|
||||
|
||||
hierarchical_result = hierarchical_crew.kickoff()
|
||||
|
||||
assert hierarchical_result.raw_output().startswith(
|
||||
"The history of artificial intelligence (AI) is a fascinating journey"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_single_task_with_async_execution():
|
||||
researcher_agent = Agent(
|
||||
role="Researcher",
|
||||
goal="Make the best research and analysis on content about AI and AI agents",
|
||||
backstory="You're an expert researcher, specialized in technology, software engineering, AI and startups. You work as a freelancer and is now working on doing research and analysis for a new customer.",
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
list_ideas = Task(
|
||||
description="Generate a list of 5 interesting ideas to explore for an article, where each bulletpoint is under 15 words.",
|
||||
expected_output="Bullet point list of 5 important events. No additional commentary.",
|
||||
agent=researcher_agent,
|
||||
async_execution=True,
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[researcher_agent],
|
||||
process=Process.sequential,
|
||||
tasks=[list_ideas],
|
||||
)
|
||||
|
||||
result = crew.kickoff()
|
||||
print(result.raw_output())
|
||||
assert result.raw_output().startswith(
|
||||
"- The impact of AI agents on remote work productivity."
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_three_task_with_async_execution():
|
||||
researcher_agent = Agent(
|
||||
role="Researcher",
|
||||
goal="Make the best research and analysis on content about AI and AI agents",
|
||||
backstory="You're an expert researcher, specialized in technology, software engineering, AI and startups. You work as a freelancer and is now working on doing research and analysis for a new customer.",
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
bullet_list = Task(
|
||||
description="Generate a list of 5 interesting ideas to explore for an article, where each bulletpoint is under 15 words.",
|
||||
expected_output="Bullet point list of 5 important events. No additional commentary.",
|
||||
agent=researcher_agent,
|
||||
async_execution=True,
|
||||
)
|
||||
numbered_list = Task(
|
||||
description="Generate a list of 5 interesting ideas to explore for an article, where each bulletpoint is under 15 words.",
|
||||
expected_output="Numbered list of 5 important events. No additional commentary.",
|
||||
agent=researcher_agent,
|
||||
async_execution=True,
|
||||
)
|
||||
letter_list = Task(
|
||||
description="Generate a list of 5 interesting ideas to explore for an article, where each bulletpoint is under 15 words.",
|
||||
expected_output="Numbered list using [A), B), C)] list of 5 important events. No additional commentary.",
|
||||
agent=researcher_agent,
|
||||
async_execution=True,
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[researcher_agent],
|
||||
process=Process.sequential,
|
||||
tasks=[bullet_list, numbered_list, letter_list],
|
||||
)
|
||||
|
||||
# Expected result is that we are going to concatenate the output from each async task.
|
||||
# Because we add a buffer between each task, we should see a "----------" string
|
||||
# after the first and second task in the final output.
|
||||
result = crew.kickoff()
|
||||
assert result.raw_output().count("\n\n----------\n\n") == 2
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.asyncio
|
||||
async def test_crew_async_kickoff():
|
||||
inputs = [
|
||||
{"topic": "dog"},
|
||||
{"topic": "cat"},
|
||||
{"topic": "apple"},
|
||||
]
|
||||
|
||||
agent = Agent(
|
||||
role="{topic} Researcher",
|
||||
goal="Express hot takes on {topic}.",
|
||||
backstory="You have a lot of experience with {topic}.",
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Give me an analysis around {topic}.",
|
||||
expected_output="1 bullet point about {topic} that's under 15 words.",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task], full_output=True)
|
||||
results = await crew.kickoff_for_each_async(inputs=inputs)
|
||||
|
||||
assert len(results) == len(inputs)
|
||||
for result in results:
|
||||
# Assert that all required keys are in usage_metrics and their values are not None
|
||||
for key in [
|
||||
"total_tokens",
|
||||
"prompt_tokens",
|
||||
"completion_tokens",
|
||||
"successful_requests",
|
||||
]:
|
||||
assert key in result.token_usage
|
||||
# TODO: FIX THIS WHEN USAGE METRICS ARE RE-DONE
|
||||
# assert result.token_usage[key] > 0
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_async_task_execution_call_count():
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
list_ideas = Task(
|
||||
description="Give me a list of 5 interesting ideas to explore for na article, what makes them unique and interesting.",
|
||||
expected_output="Bullet point list of 5 important events.",
|
||||
agent=researcher,
|
||||
async_execution=True,
|
||||
)
|
||||
list_important_history = Task(
|
||||
description="Research the history of AI and give me the 5 most important events that shaped the technology.",
|
||||
expected_output="Bullet point list of 5 important events.",
|
||||
agent=researcher,
|
||||
async_execution=True,
|
||||
)
|
||||
write_article = Task(
|
||||
description="Write an article about the history of AI and its most important events.",
|
||||
expected_output="A 4 paragraph article about AI.",
|
||||
agent=writer,
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
process=Process.sequential,
|
||||
tasks=[list_ideas, list_important_history, write_article],
|
||||
)
|
||||
|
||||
with patch.object(Agent, "execute_task") as execute:
|
||||
execute.return_value = "ok"
|
||||
with patch.object(threading.Thread, "start") as start:
|
||||
thread = threading.Thread(target=lambda: None, args=()).start()
|
||||
start.return_value = thread
|
||||
with patch.object(threading.Thread, "join", wraps=thread.join()) as join:
|
||||
list_ideas.output = TaskOutput(
|
||||
description="A 4 paragraph article about AI.",
|
||||
raw_output="ok",
|
||||
agent="writer",
|
||||
)
|
||||
list_important_history.output = TaskOutput(
|
||||
description="A 4 paragraph article about AI.",
|
||||
raw_output="ok",
|
||||
agent="writer",
|
||||
)
|
||||
crew.kickoff()
|
||||
start.assert_called()
|
||||
join.assert_called()
|
||||
# Create a valid TaskOutput instance to mock the return value
|
||||
mock_task_output = TaskOutput(
|
||||
description="Mock description", raw_output="mocked output", agent="mocked agent"
|
||||
)
|
||||
|
||||
# Create a MagicMock Future instance
|
||||
mock_future = MagicMock(spec=Future)
|
||||
mock_future.result.return_value = mock_task_output
|
||||
|
||||
# Directly set the output attribute for each task
|
||||
list_ideas.output = mock_task_output
|
||||
list_important_history.output = mock_task_output
|
||||
write_article.output = mock_task_output
|
||||
|
||||
with patch.object(
|
||||
Task, "execute_sync", return_value=mock_task_output
|
||||
) as mock_execute_sync, patch.object(
|
||||
Task, "execute_async", return_value=mock_future
|
||||
) as mock_execute_async:
|
||||
crew.kickoff()
|
||||
|
||||
assert mock_execute_async.call_count == 2
|
||||
assert mock_execute_sync.call_count == 1
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_kickoff_for_each_single_input():
|
||||
"""Tests if kickoff_for_each works with a single input."""
|
||||
from unittest.mock import patch
|
||||
|
||||
inputs = [{"topic": "dog"}]
|
||||
expected_outputs = ["Dogs are loyal companions and popular pets."]
|
||||
|
||||
agent = Agent(
|
||||
role="{topic} Researcher",
|
||||
goal="Express hot takes on {topic}.",
|
||||
backstory="You have a lot of experience with {topic}.",
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Give me an analysis around {topic}.",
|
||||
expected_output="1 bullet point about {topic} that's under 15 words.",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
with patch.object(Agent, "execute_task") as mock_execute_task:
|
||||
mock_execute_task.side_effect = expected_outputs
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
results = crew.kickoff_for_each(inputs=inputs)
|
||||
|
||||
assert len(results) == 1
|
||||
print("RESULT:", results)
|
||||
for result in results:
|
||||
assert result == expected_outputs[0]
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_kickoff_for_each_multiple_inputs():
|
||||
"""Tests if kickoff_for_each works with multiple inputs."""
|
||||
from unittest.mock import patch
|
||||
|
||||
inputs = [
|
||||
{"topic": "dog"},
|
||||
{"topic": "cat"},
|
||||
{"topic": "apple"},
|
||||
]
|
||||
expected_outputs = [
|
||||
"Dogs are loyal companions and popular pets.",
|
||||
"Cats are independent and low-maintenance pets.",
|
||||
"Apples are a rich source of dietary fiber and vitamin C.",
|
||||
]
|
||||
|
||||
agent = Agent(
|
||||
role="{topic} Researcher",
|
||||
goal="Express hot takes on {topic}.",
|
||||
backstory="You have a lot of experience with {topic}.",
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Give me an analysis around {topic}.",
|
||||
expected_output="1 bullet point about {topic} that's under 15 words.",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
with patch.object(Agent, "execute_task") as mock_execute_task:
|
||||
mock_execute_task.side_effect = expected_outputs
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
results = crew.kickoff_for_each(inputs=inputs)
|
||||
|
||||
assert len(results) == len(inputs)
|
||||
for i, res in enumerate(results):
|
||||
assert res == expected_outputs[i]
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_kickoff_for_each_empty_input():
|
||||
"""Tests if kickoff_for_each handles an empty input list."""
|
||||
agent = Agent(
|
||||
role="{topic} Researcher",
|
||||
goal="Express hot takes on {topic}.",
|
||||
backstory="You have a lot of experience with {topic}.",
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Give me an analysis around {topic}.",
|
||||
expected_output="1 bullet point about {topic} that's under 15 words.",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
results = crew.kickoff_for_each(inputs=[])
|
||||
assert results == []
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_kickoff_for_each_invalid_input():
|
||||
"""Tests if kickoff_for_each raises TypeError for invalid input types."""
|
||||
|
||||
agent = Agent(
|
||||
role="{topic} Researcher",
|
||||
goal="Express hot takes on {topic}.",
|
||||
backstory="You have a lot of experience with {topic}.",
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Give me an analysis around {topic}.",
|
||||
expected_output="1 bullet point about {topic} that's under 15 words.",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
|
||||
with pytest.raises(TypeError):
|
||||
# Pass a string instead of a list
|
||||
crew.kickoff_for_each("invalid input")
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_kickoff_for_each_error_handling():
|
||||
"""Tests error handling in kickoff_for_each when kickoff raises an error."""
|
||||
from unittest.mock import patch
|
||||
|
||||
inputs = [
|
||||
{"topic": "dog"},
|
||||
{"topic": "cat"},
|
||||
{"topic": "apple"},
|
||||
]
|
||||
expected_outputs = [
|
||||
"Dogs are loyal companions and popular pets.",
|
||||
"Cats are independent and low-maintenance pets.",
|
||||
"Apples are a rich source of dietary fiber and vitamin C.",
|
||||
]
|
||||
agent = Agent(
|
||||
role="{topic} Researcher",
|
||||
goal="Express hot takes on {topic}.",
|
||||
backstory="You have a lot of experience with {topic}.",
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Give me an analysis around {topic}.",
|
||||
expected_output="1 bullet point about {topic} that's under 15 words.",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
|
||||
with patch.object(Crew, "kickoff") as mock_kickoff:
|
||||
mock_kickoff.side_effect = expected_outputs[:2] + [
|
||||
Exception("Simulated kickoff error")
|
||||
]
|
||||
with pytest.raises(Exception, match="Simulated kickoff error"):
|
||||
crew.kickoff_for_each(inputs=inputs)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.asyncio
|
||||
async def test_kickoff_async_basic_functionality_and_output():
|
||||
"""Tests the basic functionality and output of kickoff_async."""
|
||||
from unittest.mock import patch
|
||||
|
||||
inputs = {"topic": "dog"}
|
||||
|
||||
agent = Agent(
|
||||
role="{topic} Researcher",
|
||||
goal="Express hot takes on {topic}.",
|
||||
backstory="You have a lot of experience with {topic}.",
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Give me an analysis around {topic}.",
|
||||
expected_output="1 bullet point about {topic} that's under 15 words.",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
# Create the crew
|
||||
crew = Crew(
|
||||
agents=[agent],
|
||||
tasks=[task],
|
||||
)
|
||||
|
||||
expected_output = "This is a sample output from kickoff."
|
||||
with patch.object(Crew, "kickoff", return_value=expected_output) as mock_kickoff:
|
||||
result = await crew.kickoff_async(inputs)
|
||||
|
||||
assert isinstance(result, str), "Result should be a string"
|
||||
assert result == expected_output, "Result should match expected output"
|
||||
mock_kickoff.assert_called_once_with(inputs)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.asyncio
|
||||
async def test_async_kickoff_for_each_async_basic_functionality_and_output():
|
||||
"""Tests the basic functionality and output of akickoff_for_each_async."""
|
||||
from unittest.mock import patch
|
||||
|
||||
inputs = [
|
||||
{"topic": "dog"},
|
||||
{"topic": "cat"},
|
||||
{"topic": "apple"},
|
||||
]
|
||||
|
||||
# Define expected outputs for each input
|
||||
expected_outputs = [
|
||||
"Dogs are loyal companions and popular pets.",
|
||||
"Cats are independent and low-maintenance pets.",
|
||||
"Apples are a rich source of dietary fiber and vitamin C.",
|
||||
]
|
||||
|
||||
agent = Agent(
|
||||
role="{topic} Researcher",
|
||||
goal="Express hot takes on {topic}.",
|
||||
backstory="You have a lot of experience with {topic}.",
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Give me an analysis around {topic}.",
|
||||
expected_output="1 bullet point about {topic} that's under 15 words.",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
with patch.object(
|
||||
Crew, "kickoff_async", side_effect=expected_outputs
|
||||
) as mock_kickoff_async:
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
|
||||
results = await crew.kickoff_for_each_async(inputs)
|
||||
|
||||
assert len(results) == len(inputs)
|
||||
assert results == expected_outputs
|
||||
for input_data in inputs:
|
||||
mock_kickoff_async.assert_any_call(inputs=input_data)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.asyncio
|
||||
async def test_async_kickoff_for_each_async_empty_input():
|
||||
"""Tests if akickoff_for_each_async handles an empty input list."""
|
||||
|
||||
agent = Agent(
|
||||
role="{topic} Researcher",
|
||||
goal="Express hot takes on {topic}.",
|
||||
backstory="You have a lot of experience with {topic}.",
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Give me an analysis around {topic}.",
|
||||
expected_output="1 bullet point about {topic} that's under 15 words.",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
# Create the crew
|
||||
crew = Crew(
|
||||
agents=[agent],
|
||||
tasks=[task],
|
||||
)
|
||||
|
||||
# Call the function we are testing
|
||||
results = await crew.kickoff_for_each_async([])
|
||||
|
||||
# Assertion
|
||||
assert results == [], "Result should be an empty list when input is empty"
|
||||
|
||||
|
||||
def test_set_agents_step_callback():
|
||||
@@ -597,7 +1106,7 @@ def test_task_with_no_arguments():
|
||||
crew = Crew(agents=[researcher], tasks=[task])
|
||||
|
||||
result = crew.kickoff()
|
||||
assert result == "75"
|
||||
assert result.raw_output() == "75"
|
||||
|
||||
|
||||
def test_code_execution_flag_adds_code_tool_upon_kickoff():
|
||||
@@ -625,8 +1134,6 @@ def test_code_execution_flag_adds_code_tool_upon_kickoff():
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_delegation_is_not_enabled_if_there_are_only_one_agent():
|
||||
from unittest.mock import patch
|
||||
|
||||
researcher = Agent(
|
||||
role="Researcher",
|
||||
goal="Make the best research and analysis on content about AI and AI agents",
|
||||
@@ -641,10 +1148,9 @@ def test_delegation_is_not_enabled_if_there_are_only_one_agent():
|
||||
)
|
||||
|
||||
crew = Crew(agents=[researcher], tasks=[task])
|
||||
with patch.object(Task, "execute") as execute:
|
||||
execute.return_value = "ok"
|
||||
crew.kickoff()
|
||||
assert task.tools == []
|
||||
|
||||
crew.kickoff()
|
||||
assert task.tools == []
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -662,7 +1168,7 @@ def test_agents_do_not_get_delegation_tools_with_there_is_only_one_agent():
|
||||
|
||||
result = crew.kickoff()
|
||||
assert (
|
||||
result
|
||||
result.raw_output()
|
||||
== "Howdy! I hope this message finds you well and brings a smile to your face. Have a fantastic day!"
|
||||
)
|
||||
assert len(agent.tools) == 0
|
||||
@@ -682,13 +1188,93 @@ def test_agent_usage_metrics_are_captured_for_sequential_process():
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
|
||||
result = crew.kickoff()
|
||||
assert result == "Howdy!"
|
||||
assert crew.usage_metrics == {
|
||||
"completion_tokens": 17,
|
||||
"prompt_tokens": 158,
|
||||
"successful_requests": 1,
|
||||
"total_tokens": 175,
|
||||
}
|
||||
assert result.raw_output() == "Howdy!"
|
||||
|
||||
required_keys = [
|
||||
"total_tokens",
|
||||
"prompt_tokens",
|
||||
"completion_tokens",
|
||||
"successful_requests",
|
||||
]
|
||||
for key in required_keys:
|
||||
assert key in crew.usage_metrics, f"Key '{key}' not found in usage_metrics"
|
||||
assert crew.usage_metrics[key] > 0, f"Value for key '{key}' is zero"
|
||||
|
||||
|
||||
def test_conditional_task_requirement_breaks_when_singular_conditional_task():
|
||||
task = ConditionalTask(
|
||||
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
|
||||
expected_output="5 bullet points with a paragraph for each idea.",
|
||||
)
|
||||
|
||||
with pytest.raises(pydantic_core._pydantic_core.ValidationError):
|
||||
Crew(
|
||||
agents=[researcher, writer],
|
||||
tasks=[task],
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_conditional_should_not_execute():
|
||||
task1 = Task(description="Return hello", expected_output="say hi", agent=researcher)
|
||||
|
||||
condition_mock = MagicMock(return_value=False)
|
||||
task2 = ConditionalTask(
|
||||
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
|
||||
expected_output="5 bullet points with a paragraph for each idea.",
|
||||
condition=condition_mock,
|
||||
agent=writer,
|
||||
)
|
||||
crew_met = Crew(
|
||||
agents=[researcher, writer],
|
||||
tasks=[task1, task2],
|
||||
)
|
||||
with patch.object(Task, "execute_sync") as mock_execute_sync:
|
||||
mock_execute_sync.return_value = TaskOutput(
|
||||
description="Task 1 description",
|
||||
raw_output="Task 1 output",
|
||||
agent="Researcher",
|
||||
)
|
||||
|
||||
result = crew_met.kickoff()
|
||||
assert mock_execute_sync.call_count == 1
|
||||
|
||||
assert condition_mock.call_count == 1
|
||||
assert condition_mock() is False
|
||||
|
||||
assert task2.output is None
|
||||
assert result.raw_output().startswith("Task 1 output")
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_conditional_should_execute():
|
||||
task1 = Task(description="Return hello", expected_output="say hi", agent=researcher)
|
||||
|
||||
condition_mock = MagicMock(
|
||||
return_value=True
|
||||
) # should execute this conditional task
|
||||
task2 = ConditionalTask(
|
||||
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
|
||||
expected_output="5 bullet points with a paragraph for each idea.",
|
||||
condition=condition_mock,
|
||||
agent=writer,
|
||||
)
|
||||
crew_met = Crew(
|
||||
agents=[researcher, writer],
|
||||
tasks=[task1, task2],
|
||||
)
|
||||
with patch.object(Task, "execute_sync") as mock_execute_sync:
|
||||
mock_execute_sync.return_value = TaskOutput(
|
||||
description="Task 1 description",
|
||||
raw_output="Task 1 output",
|
||||
agent="Researcher",
|
||||
)
|
||||
|
||||
crew_met.kickoff()
|
||||
|
||||
assert condition_mock.call_count == 1
|
||||
assert condition_mock() is True
|
||||
assert mock_execute_sync.call_count == 2
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -712,14 +1298,17 @@ def test_agent_usage_metrics_are_captured_for_hierarchical_process():
|
||||
)
|
||||
|
||||
result = crew.kickoff()
|
||||
assert result == '"Howdy!"'
|
||||
assert result.raw_output() == '"Howdy!"'
|
||||
|
||||
assert crew.usage_metrics == {
|
||||
"total_tokens": 1616,
|
||||
"prompt_tokens": 1333,
|
||||
"completion_tokens": 283,
|
||||
"successful_requests": 3,
|
||||
}
|
||||
required_keys = [
|
||||
"total_tokens",
|
||||
"prompt_tokens",
|
||||
"completion_tokens",
|
||||
"successful_requests",
|
||||
]
|
||||
for key in required_keys:
|
||||
assert key in crew.usage_metrics, f"Key '{key}' not found in usage_metrics"
|
||||
assert crew.usage_metrics[key] > 0, f"Value for key '{key}' is zero"
|
||||
|
||||
|
||||
def test_crew_inputs_interpolate_both_agents_and_tasks():
|
||||
@@ -775,9 +1364,81 @@ def test_crew_inputs_interpolate_both_agents_and_tasks_diff():
|
||||
interpolate_task_inputs.assert_called()
|
||||
|
||||
|
||||
def test_task_callback_on_crew():
|
||||
def test_crew_does_not_interpolate_without_inputs():
|
||||
from unittest.mock import patch
|
||||
|
||||
agent = Agent(
|
||||
role="{topic} Researcher",
|
||||
goal="Express hot takes on {topic}.",
|
||||
backstory="You have a lot of experience with {topic}.",
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Give me an analysis around {topic}.",
|
||||
expected_output="{points} bullet points about {topic}.",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
|
||||
with patch.object(Agent, "interpolate_inputs") as interpolate_agent_inputs:
|
||||
with patch.object(Task, "interpolate_inputs") as interpolate_task_inputs:
|
||||
crew.kickoff()
|
||||
interpolate_agent_inputs.assert_not_called()
|
||||
interpolate_task_inputs.assert_not_called()
|
||||
|
||||
|
||||
# TODO: Ask @joao if we want to start throwing errors if inputs are not provided
|
||||
# def test_crew_partial_inputs():
|
||||
# agent = Agent(
|
||||
# role="{topic} Researcher",
|
||||
# goal="Express hot takes on {topic}.",
|
||||
# backstory="You have a lot of experience with {topic}.",
|
||||
# )
|
||||
|
||||
# task = Task(
|
||||
# description="Give me an analysis around {topic}.",
|
||||
# expected_output="{points} bullet points about {topic}.",
|
||||
# )
|
||||
|
||||
# crew = Crew(agents=[agent], tasks=[task], inputs={"topic": "AI"})
|
||||
# inputs = {"topic": "AI"}
|
||||
# crew._interpolate_inputs(inputs=inputs) # Manual call for now
|
||||
|
||||
# assert crew.tasks[0].description == "Give me an analysis around AI."
|
||||
# assert crew.tasks[0].expected_output == "{points} bullet points about AI."
|
||||
# assert crew.agents[0].role == "AI Researcher"
|
||||
# assert crew.agents[0].goal == "Express hot takes on AI."
|
||||
# assert crew.agents[0].backstory == "You have a lot of experience with AI."
|
||||
|
||||
|
||||
# TODO: If we do want ot throw errors if we are missing inputs. Add in this test.
|
||||
# def test_crew_invalid_inputs():
|
||||
# agent = Agent(
|
||||
# role="{topic} Researcher",
|
||||
# goal="Express hot takes on {topic}.",
|
||||
# backstory="You have a lot of experience with {topic}.",
|
||||
# )
|
||||
|
||||
# task = Task(
|
||||
# description="Give me an analysis around {topic}.",
|
||||
# expected_output="{points} bullet points about {topic}.",
|
||||
# )
|
||||
|
||||
# crew = Crew(agents=[agent], tasks=[task], inputs={"subject": "AI"})
|
||||
# inputs = {"subject": "AI"}
|
||||
# crew._interpolate_inputs(inputs=inputs) # Manual call for now
|
||||
|
||||
# assert crew.tasks[0].description == "Give me an analysis around {topic}."
|
||||
# assert crew.tasks[0].expected_output == "{points} bullet points about {topic}."
|
||||
# assert crew.agents[0].role == "{topic} Researcher"
|
||||
# assert crew.agents[0].goal == "Express hot takes on {topic}."
|
||||
# assert crew.agents[0].backstory == "You have a lot of experience with {topic}."
|
||||
|
||||
|
||||
def test_task_callback_on_crew():
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
researcher_agent = Agent(
|
||||
role="Researcher",
|
||||
goal="Make the best research and analysis on content about AI and AI agents",
|
||||
@@ -792,17 +1453,23 @@ def test_task_callback_on_crew():
|
||||
async_execution=True,
|
||||
)
|
||||
|
||||
mock_callback = MagicMock()
|
||||
|
||||
crew = Crew(
|
||||
agents=[researcher_agent],
|
||||
process=Process.sequential,
|
||||
tasks=[list_ideas],
|
||||
task_callback=lambda: None,
|
||||
task_callback=mock_callback,
|
||||
)
|
||||
|
||||
with patch.object(Agent, "execute_task") as execute:
|
||||
execute.return_value = "ok"
|
||||
crew.kickoff()
|
||||
|
||||
assert list_ideas.callback is not None
|
||||
mock_callback.assert_called_once()
|
||||
args, _ = mock_callback.call_args
|
||||
assert isinstance(args[0], TaskOutput)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -874,7 +1541,7 @@ def test_tools_with_custom_caching():
|
||||
input={"first_number": 2, "second_number": 6},
|
||||
output=12,
|
||||
)
|
||||
assert result == "3"
|
||||
assert result.raw_output() == "3"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -972,10 +1639,20 @@ def test_manager_agent():
|
||||
tasks=[task],
|
||||
)
|
||||
|
||||
with patch.object(Task, "execute") as execute:
|
||||
mock_task_output = TaskOutput(
|
||||
description="Mock description", raw_output="mocked output", agent="mocked agent"
|
||||
)
|
||||
|
||||
# Because we are mocking execute_sync, we never hit the underlying _execute_core
|
||||
# which sets the output attribute of the task
|
||||
task.output = mock_task_output
|
||||
|
||||
with patch.object(
|
||||
Task, "execute_sync", return_value=mock_task_output
|
||||
) as mock_execute_sync:
|
||||
crew.kickoff()
|
||||
assert manager.allow_delegation is True
|
||||
execute.assert_called()
|
||||
mock_execute_sync.assert_called()
|
||||
|
||||
|
||||
def test_manager_agent_in_agents_raises_exception():
|
||||
@@ -1134,3 +1811,8 @@ def test__setup_for_training():
|
||||
|
||||
for agent in agents:
|
||||
assert agent.allow_delegation is False
|
||||
|
||||
|
||||
# TODO: TEST EXPORT OUTPUT TASK WITH PYDANTIC
|
||||
# TODO: TEST EXPORT OUTPUT TASK WITH JSON
|
||||
# TODO: TEST EXPORT OUTPUT TASK CALLBACK
|
||||
|
||||
@@ -80,7 +80,7 @@ def test_task_prompt_includes_expected_output():
|
||||
|
||||
with patch.object(Agent, "execute_task") as execute:
|
||||
execute.return_value = "ok"
|
||||
task.execute()
|
||||
task.execute_sync()
|
||||
execute.assert_called_once_with(task=task, context=None, tools=[])
|
||||
|
||||
|
||||
@@ -103,7 +103,7 @@ def test_task_callback():
|
||||
|
||||
with patch.object(Agent, "execute_task") as execute:
|
||||
execute.return_value = "ok"
|
||||
task.execute()
|
||||
task.execute_sync()
|
||||
task_completed.assert_called_once_with(task.output)
|
||||
|
||||
|
||||
@@ -126,7 +126,7 @@ def test_task_callback_returns_task_ouput():
|
||||
|
||||
with patch.object(Agent, "execute_task") as execute:
|
||||
execute.return_value = "exported_ok"
|
||||
task.execute()
|
||||
task.execute_sync()
|
||||
# Ensure the callback is called with a TaskOutput object serialized to JSON
|
||||
task_completed.assert_called_once()
|
||||
callback_data = task_completed.call_args[0][0]
|
||||
@@ -161,7 +161,7 @@ def test_execute_with_agent():
|
||||
)
|
||||
|
||||
with patch.object(Agent, "execute_task", return_value="ok") as execute:
|
||||
task.execute(agent=researcher)
|
||||
task.execute_sync(agent=researcher)
|
||||
execute.assert_called_once_with(task=task, context=None, tools=[])
|
||||
|
||||
|
||||
@@ -181,7 +181,7 @@ def test_async_execution():
|
||||
)
|
||||
|
||||
with patch.object(Agent, "execute_task", return_value="ok") as execute:
|
||||
task.execute(agent=researcher)
|
||||
task.execute_async(agent=researcher)
|
||||
execute.assert_called_once_with(task=task, context=None, tools=[])
|
||||
|
||||
|
||||
@@ -525,3 +525,14 @@ def test_interpolate_inputs():
|
||||
== "Give me a list of 5 interesting ideas about ML to explore for an article, what makes them unique and interesting."
|
||||
)
|
||||
assert task.expected_output == "Bullet point list of 5 interesting ideas about ML."
|
||||
|
||||
|
||||
"""
|
||||
TODO: TEST SYNC
|
||||
- Verify return type
|
||||
"""
|
||||
|
||||
"""
|
||||
TODO: TEST ASYNC
|
||||
- Verify return type
|
||||
"""
|
||||
|
||||
Reference in New Issue
Block a user