Compare commits

..

4 Commits

Author SHA1 Message Date
Brandon Hancock (bhancock_ai)
c9cf47e6ff Merge branch 'main' into fix/_should_force_answer 2025-01-07 13:29:07 -05:00
Shahar Yair
7fd71749f4 Merge branch 'main' into fix/_should_force_answer 2025-01-02 15:24:23 +02:00
Shahar Yair
ea413ae03b Merge branch 'main' into fix/_should_force_answer 2024-12-28 11:17:46 +02:00
Shahar Yair
f1299f484d fix _should_force_answer bug 2024-12-28 11:10:16 +02:00
17 changed files with 32214 additions and 2259 deletions

View File

@@ -1,60 +1,32 @@
name: Run Tests name: Run Tests
on: on: [pull_request]
pull_request:
push:
branches:
- main
permissions: permissions:
contents: write contents: write
env:
OPENAI_API_KEY: fake-api-key
jobs: jobs:
tests: tests:
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 15 timeout-minutes: 15
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
MODEL: gpt-4o-mini
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v4 uses: actions/checkout@v4
- name: Install UV - name: Install uv
uses: astral-sh/setup-uv@v3 uses: astral-sh/setup-uv@v3
with: with:
enable-cache: true enable-cache: true
- name: Set up Python - name: Set up Python
run: uv python install 3.12.8 run: uv python install 3.12.8
- name: Install the project - name: Install the project
run: uv sync --dev --all-extras run: uv sync --dev --all-extras
- name: Run General Tests - name: Run tests
run: uv run pytest tests -k "not main_branch_tests" -vv run: uv run pytest tests -vv
main_branch_tests:
if: github.ref == 'refs/heads/main'
runs-on: ubuntu-latest
needs: tests
timeout-minutes: 15
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install UV
uses: astral-sh/setup-uv@v3
with:
enable-cache: true
- name: Set up Python
run: uv python install 3.12.8
- name: Install the project
run: uv sync --dev --all-extras
- name: Run Main Branch Specific Tests
run: uv run pytest tests/main_branch_tests -vv

View File

@@ -101,8 +101,6 @@ from crewai_tools import SerperDevTool
class LatestAiDevelopmentCrew(): class LatestAiDevelopmentCrew():
"""LatestAiDevelopment crew""" """LatestAiDevelopment crew"""
agents_config = "config/agents.yaml"
@agent @agent
def researcher(self) -> Agent: def researcher(self) -> Agent:
return Agent( return Agent(

View File

@@ -19,10 +19,15 @@ class CrewAgentExecutorMixin:
agent: Optional["BaseAgent"] agent: Optional["BaseAgent"]
task: Optional["Task"] task: Optional["Task"]
iterations: int iterations: int
have_forced_answer: bool
max_iter: int max_iter: int
_i18n: I18N _i18n: I18N
_printer: Printer = Printer() _printer: Printer = Printer()
def _should_force_answer(self) -> bool:
"""Determine if a forced answer is required based on iteration count."""
return self.iterations >= self.max_iter
def _create_short_term_memory(self, output) -> None: def _create_short_term_memory(self, output) -> None:
"""Create and save a short-term memory item if conditions are met.""" """Create and save a short-term memory item if conditions are met."""
if ( if (

View File

@@ -1,7 +1,7 @@
import json import json
import re import re
from dataclasses import dataclass from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union from typing import Any, Dict, List, Union
from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
@@ -50,7 +50,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
original_tools: List[Any] = [], original_tools: List[Any] = [],
function_calling_llm: Any = None, function_calling_llm: Any = None,
respect_context_window: bool = False, respect_context_window: bool = False,
request_within_rpm_limit: Optional[Callable[[], bool]] = None, request_within_rpm_limit: Any = None,
callbacks: List[Any] = [], callbacks: List[Any] = [],
): ):
self._i18n: I18N = I18N() self._i18n: I18N = I18N()
@@ -77,6 +77,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
self.messages: List[Dict[str, str]] = [] self.messages: List[Dict[str, str]] = []
self.iterations = 0 self.iterations = 0
self.log_error_after = 3 self.log_error_after = 3
self.have_forced_answer = False
self.tool_name_to_tool_map: Dict[str, BaseTool] = { self.tool_name_to_tool_map: Dict[str, BaseTool] = {
tool.name: tool for tool in self.tools tool.name: tool for tool in self.tools
} }
@@ -107,151 +108,106 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
self._create_long_term_memory(formatted_answer) self._create_long_term_memory(formatted_answer)
return {"output": formatted_answer.output} return {"output": formatted_answer.output}
def _invoke_loop(self): def _invoke_loop(self, formatted_answer=None):
"""
Main loop to invoke the agent's thought process until it reaches a conclusion
or the maximum number of iterations is reached.
"""
formatted_answer = None
while not isinstance(formatted_answer, AgentFinish):
try: try:
if self._has_reached_max_iterations(): while not isinstance(formatted_answer, AgentFinish):
formatted_answer = self._handle_max_iterations_exceeded( if not self.request_within_rpm_limit or self.request_within_rpm_limit():
formatted_answer
)
break
self._enforce_rpm_limit()
answer = self._get_llm_response()
formatted_answer = self._process_llm_response(answer)
if isinstance(formatted_answer, AgentAction):
tool_result = self._execute_tool_and_check_finality(
formatted_answer
)
formatted_answer = self._handle_agent_action(
formatted_answer, tool_result
)
self._invoke_step_callback(formatted_answer)
self._append_message(formatted_answer.text, role="assistant")
except OutputParserException as e:
formatted_answer = self._handle_output_parser_exception(e)
except Exception as e:
if self._is_context_length_exceeded(e):
self._handle_context_length()
continue
else:
raise e
self._show_logs(formatted_answer)
return formatted_answer
def _has_reached_max_iterations(self) -> bool:
"""Check if the maximum number of iterations has been reached."""
return self.iterations >= self.max_iter
def _enforce_rpm_limit(self) -> None:
"""Enforce the requests per minute (RPM) limit if applicable."""
if self.request_within_rpm_limit:
self.request_within_rpm_limit()
def _get_llm_response(self) -> str:
"""Call the LLM and return the response, handling any invalid responses."""
answer = self.llm.call( answer = self.llm.call(
self.messages, self.messages,
callbacks=self.callbacks, callbacks=self.callbacks,
) )
if not answer: if answer is None or answer == "":
self._printer.print( self._printer.print(
content="Received None or empty response from LLM call.", content="Received None or empty response from LLM call.",
color="red", color="red",
) )
raise ValueError("Invalid response from LLM call - None or empty.") raise ValueError(
"Invalid response from LLM call - None or empty."
)
return answer
def _process_llm_response(self, answer: str) -> Union[AgentAction, AgentFinish]:
"""Process the LLM response and format it into an AgentAction or AgentFinish."""
if not self.use_stop_words: if not self.use_stop_words:
try: try:
# Preliminary parsing to check for errors.
self._format_answer(answer) self._format_answer(answer)
except OutputParserException as e: except OutputParserException as e:
if FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE in e.error: if (
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE
in e.error
):
answer = answer.split("Observation:")[0].strip() answer = answer.split("Observation:")[0].strip()
self.iterations += 1 self.iterations += 1
return self._format_answer(answer) formatted_answer = self._format_answer(answer)
def _handle_agent_action( if isinstance(formatted_answer, AgentAction):
self, formatted_answer: AgentAction, tool_result: ToolResult tool_result = self._execute_tool_and_check_finality(
) -> Union[AgentAction, AgentFinish]: formatted_answer
"""Handle the AgentAction, execute tools, and process the results.""" )
add_image_tool = self._i18n.tools("add_image")
if ( # Directly append the result to the messages if the
isinstance(add_image_tool, dict) # tool is "Add image to content" in case of multimodal
and formatted_answer.tool.casefold().strip() # agents
== add_image_tool.get("name", "").casefold().strip() if formatted_answer.tool == self._i18n.tools("add_image")["name"]:
):
self.messages.append(tool_result.result) self.messages.append(tool_result.result)
return formatted_answer # Continue the loop continue
else:
if self.step_callback: if self.step_callback:
self.step_callback(tool_result) self.step_callback(tool_result)
formatted_answer.text += f"\nObservation: {tool_result.result}" formatted_answer.text += f"\nObservation: {tool_result.result}"
formatted_answer.result = tool_result.result
formatted_answer.result = tool_result.result
if tool_result.result_as_answer: if tool_result.result_as_answer:
return AgentFinish( return AgentFinish(
thought="", thought="",
output=tool_result.result, output=tool_result.result,
text=formatted_answer.text, text=formatted_answer.text,
) )
self._show_logs(formatted_answer) self._show_logs(formatted_answer)
return formatted_answer
def _invoke_step_callback(self, formatted_answer) -> None:
"""Invoke the step callback if it exists."""
if self.step_callback: if self.step_callback:
self.step_callback(formatted_answer) self.step_callback(formatted_answer)
def _append_message(self, text: str, role: str = "assistant") -> None: if self._should_force_answer():
"""Append a message to the message list with the given role.""" if self.have_forced_answer:
self.messages.append(self._format_msg(text, role=role)) return AgentFinish(
def _handle_output_parser_exception(self, e: OutputParserException) -> AgentAction:
"""Handle OutputParserException by updating messages and formatted_answer."""
self.messages.append({"role": "user", "content": e.error})
formatted_answer = AgentAction(
text=e.error,
tool="",
tool_input="",
thought="", thought="",
output=self._i18n.errors(
"force_final_answer_error"
).format(formatted_answer.text),
text=formatted_answer.text,
)
else:
formatted_answer.text += (
f'\n{self._i18n.errors("force_final_answer")}'
)
self.have_forced_answer = True
self.messages.append(
self._format_msg(formatted_answer.text, role="assistant")
) )
except OutputParserException as e:
self.messages.append({"role": "user", "content": e.error})
if self.iterations > self.log_error_after: if self.iterations > self.log_error_after:
self._printer.print( self._printer.print(
content=f"Error parsing LLM output, agent will retry: {e.error}", content=f"Error parsing LLM output, agent will retry: {e.error}",
color="red", color="red",
) )
return self._invoke_loop(formatted_answer)
except Exception as e:
if LLMContextLengthExceededException(str(e))._is_context_limit_error(
str(e)
):
self._handle_context_length()
return self._invoke_loop(formatted_answer)
else:
raise e
self._show_logs(formatted_answer)
return formatted_answer return formatted_answer
def _is_context_length_exceeded(self, exception: Exception) -> bool:
"""Check if the exception is due to context length exceeding."""
return LLMContextLengthExceededException(
str(exception)
)._is_context_limit_error(str(exception))
def _show_start_logs(self): def _show_start_logs(self):
if self.agent is None: if self.agent is None:
raise ValueError("Agent cannot be None") raise ValueError("Agent cannot be None")
@@ -531,45 +487,3 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
self.ask_for_human_input = False self.ask_for_human_input = False
return formatted_answer return formatted_answer
def _handle_max_iterations_exceeded(self, formatted_answer):
"""
Handles the case when the maximum number of iterations is exceeded.
Performs one more LLM call to get the final answer.
Parameters:
formatted_answer: The last formatted answer from the agent.
Returns:
The final formatted answer after exceeding max iterations.
"""
self._printer.print(
content="Maximum iterations reached. Requesting final answer.",
color="yellow",
)
if formatted_answer and hasattr(formatted_answer, "text"):
assistant_message = (
formatted_answer.text + f'\n{self._i18n.errors("force_final_answer")}'
)
else:
assistant_message = self._i18n.errors("force_final_answer")
self.messages.append(self._format_msg(assistant_message, role="assistant"))
# Perform one more LLM call to get the final answer
answer = self.llm.call(
self.messages,
callbacks=self.callbacks,
)
if answer is None or answer == "":
self._printer.print(
content="Received None or empty response from LLM call.",
color="red",
)
raise ValueError("Invalid response from LLM call - None or empty.")
formatted_answer = self._format_answer(answer)
# Return the formatted answer, regardless of its type
return formatted_answer

View File

@@ -17,12 +17,6 @@ ENV_VARS = {
"key_name": "GEMINI_API_KEY", "key_name": "GEMINI_API_KEY",
} }
], ],
"nvidia_nim": [
{
"prompt": "Enter your NVIDIA API key (press Enter to skip)",
"key_name": "NVIDIA_NIM_API_KEY",
}
],
"groq": [ "groq": [
{ {
"prompt": "Enter your GROQ API key (press Enter to skip)", "prompt": "Enter your GROQ API key (press Enter to skip)",
@@ -104,7 +98,6 @@ PROVIDERS = [
"openai", "openai",
"anthropic", "anthropic",
"gemini", "gemini",
"nvidia_nim",
"groq", "groq",
"ollama", "ollama",
"watson", "watson",
@@ -128,75 +121,6 @@ MODELS = {
"gemini/gemini-gemma-2-9b-it", "gemini/gemini-gemma-2-9b-it",
"gemini/gemini-gemma-2-27b-it", "gemini/gemini-gemma-2-27b-it",
], ],
"nvidia_nim": [
"nvidia_nim/nvidia/mistral-nemo-minitron-8b-8k-instruct",
"nvidia_nim/nvidia/nemotron-4-mini-hindi-4b-instruct",
"nvidia_nim/nvidia/llama-3.1-nemotron-70b-instruct",
"nvidia_nim/nvidia/llama3-chatqa-1.5-8b",
"nvidia_nim/nvidia/llama3-chatqa-1.5-70b",
"nvidia_nim/nvidia/vila",
"nvidia_nim/nvidia/neva-22",
"nvidia_nim/nvidia/nemotron-mini-4b-instruct",
"nvidia_nim/nvidia/usdcode-llama3-70b-instruct",
"nvidia_nim/nvidia/nemotron-4-340b-instruct",
"nvidia_nim/meta/codellama-70b",
"nvidia_nim/meta/llama2-70b",
"nvidia_nim/meta/llama3-8b-instruct",
"nvidia_nim/meta/llama3-70b-instruct",
"nvidia_nim/meta/llama-3.1-8b-instruct",
"nvidia_nim/meta/llama-3.1-70b-instruct",
"nvidia_nim/meta/llama-3.1-405b-instruct",
"nvidia_nim/meta/llama-3.2-1b-instruct",
"nvidia_nim/meta/llama-3.2-3b-instruct",
"nvidia_nim/meta/llama-3.2-11b-vision-instruct",
"nvidia_nim/meta/llama-3.2-90b-vision-instruct",
"nvidia_nim/meta/llama-3.1-70b-instruct",
"nvidia_nim/google/gemma-7b",
"nvidia_nim/google/gemma-2b",
"nvidia_nim/google/codegemma-7b",
"nvidia_nim/google/codegemma-1.1-7b",
"nvidia_nim/google/recurrentgemma-2b",
"nvidia_nim/google/gemma-2-9b-it",
"nvidia_nim/google/gemma-2-27b-it",
"nvidia_nim/google/gemma-2-2b-it",
"nvidia_nim/google/deplot",
"nvidia_nim/google/paligemma",
"nvidia_nim/mistralai/mistral-7b-instruct-v0.2",
"nvidia_nim/mistralai/mixtral-8x7b-instruct-v0.1",
"nvidia_nim/mistralai/mistral-large",
"nvidia_nim/mistralai/mixtral-8x22b-instruct-v0.1",
"nvidia_nim/mistralai/mistral-7b-instruct-v0.3",
"nvidia_nim/nv-mistralai/mistral-nemo-12b-instruct",
"nvidia_nim/mistralai/mamba-codestral-7b-v0.1",
"nvidia_nim/microsoft/phi-3-mini-128k-instruct",
"nvidia_nim/microsoft/phi-3-mini-4k-instruct",
"nvidia_nim/microsoft/phi-3-small-8k-instruct",
"nvidia_nim/microsoft/phi-3-small-128k-instruct",
"nvidia_nim/microsoft/phi-3-medium-4k-instruct",
"nvidia_nim/microsoft/phi-3-medium-128k-instruct",
"nvidia_nim/microsoft/phi-3.5-mini-instruct",
"nvidia_nim/microsoft/phi-3.5-moe-instruct",
"nvidia_nim/microsoft/kosmos-2",
"nvidia_nim/microsoft/phi-3-vision-128k-instruct",
"nvidia_nim/microsoft/phi-3.5-vision-instruct",
"nvidia_nim/databricks/dbrx-instruct",
"nvidia_nim/snowflake/arctic",
"nvidia_nim/aisingapore/sea-lion-7b-instruct",
"nvidia_nim/ibm/granite-8b-code-instruct",
"nvidia_nim/ibm/granite-34b-code-instruct",
"nvidia_nim/ibm/granite-3.0-8b-instruct",
"nvidia_nim/ibm/granite-3.0-3b-a800m-instruct",
"nvidia_nim/mediatek/breeze-7b-instruct",
"nvidia_nim/upstage/solar-10.7b-instruct",
"nvidia_nim/writer/palmyra-med-70b-32k",
"nvidia_nim/writer/palmyra-med-70b",
"nvidia_nim/writer/palmyra-fin-70b-32k",
"nvidia_nim/01-ai/yi-large",
"nvidia_nim/deepseek-ai/deepseek-coder-6.7b-instruct",
"nvidia_nim/rakuten/rakutenai-7b-instruct",
"nvidia_nim/rakuten/rakutenai-7b-chat",
"nvidia_nim/baichuan-inc/baichuan2-13b-chat",
],
"groq": [ "groq": [
"groq/llama-3.1-8b-instant", "groq/llama-3.1-8b-instant",
"groq/llama-3.1-70b-versatile", "groq/llama-3.1-70b-versatile",

View File

@@ -76,7 +76,7 @@ LLM_CONTEXT_WINDOW_SIZES = {
"mixtral-8x7b-32768": 32768, "mixtral-8x7b-32768": 32768,
"llama-3.3-70b-versatile": 128000, "llama-3.3-70b-versatile": 128000,
"llama-3.3-70b-instruct": 128000, "llama-3.3-70b-instruct": 128000,
# sambanova #sambanova
"Meta-Llama-3.3-70B-Instruct": 131072, "Meta-Llama-3.3-70B-Instruct": 131072,
"QwQ-32B-Preview": 8192, "QwQ-32B-Preview": 8192,
"Qwen2.5-72B-Instruct": 8192, "Qwen2.5-72B-Instruct": 8192,

View File

@@ -27,7 +27,7 @@
"conversation_history_instruction": "You are a member of a crew collaborating to achieve a common goal. Your task is a specific action that contributes to this larger objective. For additional context, please review the conversation history between you and the user that led to the initiation of this crew. Use any relevant information or feedback from the conversation to inform your task execution and ensure your response aligns with both the immediate task and the crew's overall goals." "conversation_history_instruction": "You are a member of a crew collaborating to achieve a common goal. Your task is a specific action that contributes to this larger objective. For additional context, please review the conversation history between you and the user that led to the initiation of this crew. Use any relevant information or feedback from the conversation to inform your task execution and ensure your response aligns with both the immediate task and the crew's overall goals."
}, },
"errors": { "errors": {
"force_final_answer_error": "You can't keep going, here is the best final answer you generated:\n\n {formatted_answer}", "force_final_answer_error": "You can't keep going, this was the best you could do.\n {formatted_answer.text}",
"force_final_answer": "Now it's time you MUST give your absolute best final answer. You'll ignore all previous instructions, stop using any tools, and just return your absolute BEST Final answer.", "force_final_answer": "Now it's time you MUST give your absolute best final answer. You'll ignore all previous instructions, stop using any tools, and just return your absolute BEST Final answer.",
"agent_tool_unexisting_coworker": "\nError executing tool. coworker mentioned not found, it must be one of the following options:\n{coworkers}\n", "agent_tool_unexisting_coworker": "\nError executing tool. coworker mentioned not found, it must be one of the following options:\n{coworkers}\n",
"task_repeated_usage": "I tried reusing the same input, I must stop using this action input. I'll try something else instead.\n\n", "task_repeated_usage": "I tried reusing the same input, I must stop using this action input. I'll try something else instead.\n\n",

View File

@@ -67,6 +67,7 @@ def create_llm(
api_key=api_key, api_key=api_key,
base_url=base_url, base_url=base_url,
) )
print("LLM created with extracted parameters; " f"model='{model}'")
return created_llm return created_llm
except Exception as e: except Exception as e:
print(f"Error instantiating LLM from unknown object type: {e}") print(f"Error instantiating LLM from unknown object type: {e}")

View File

@@ -8,10 +8,8 @@ from crewai.utilities.logger import Logger
"""Controls request rate limiting for API calls.""" """Controls request rate limiting for API calls."""
class RPMController(BaseModel): class RPMController(BaseModel):
"""Manages requests per minute limiting.""" """Manages requests per minute limiting."""
max_rpm: Optional[int] = Field(default=None) max_rpm: Optional[int] = Field(default=None)
logger: Logger = Field(default_factory=lambda: Logger(verbose=False)) logger: Logger = Field(default_factory=lambda: Logger(verbose=False))
_current_rpm: int = PrivateAttr(default=0) _current_rpm: int = PrivateAttr(default=0)

View File

@@ -565,7 +565,7 @@ def test_agent_moved_on_after_max_iterations():
task=task, task=task,
tools=[get_final_answer], tools=[get_final_answer],
) )
assert output == "42" assert output == "The final answer is 42."
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
@@ -574,6 +574,7 @@ def test_agent_respect_the_max_rpm_set(capsys):
def get_final_answer() -> float: def get_final_answer() -> float:
"""Get the final answer but don't give it yet, just re-use this """Get the final answer but don't give it yet, just re-use this
tool non-stop.""" tool non-stop."""
return 42
agent = Agent( agent = Agent(
role="test role", role="test role",
@@ -640,14 +641,15 @@ def test_agent_respect_the_max_rpm_set_over_crew_rpm(capsys):
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_without_max_rpm_respects_crew_rpm(capsys): def test_agent_without_max_rpm_respet_crew_rpm(capsys):
from unittest.mock import patch from unittest.mock import patch
from crewai.tools import tool from crewai.tools import tool
@tool @tool
def get_final_answer() -> float: def get_final_answer() -> float:
"""Get the final answer but don't give it yet, just re-use this tool non-stop.""" """Get the final answer but don't give it yet, just re-use this
tool non-stop."""
return 42 return 42
agent1 = Agent( agent1 = Agent(
@@ -664,30 +666,23 @@ def test_agent_without_max_rpm_respects_crew_rpm(capsys):
role="test role2", role="test role2",
goal="test goal2", goal="test goal2",
backstory="test backstory2", backstory="test backstory2",
max_iter=5, max_iter=1,
verbose=True, verbose=True,
allow_delegation=False, allow_delegation=False,
) )
tasks = [ tasks = [
Task( Task(
description="Just say hi.", description="Just say hi.", agent=agent1, expected_output="Your greeting."
agent=agent1,
expected_output="Your greeting.",
), ),
Task( Task(
description=( description="NEVER give a Final Answer, unless you are told otherwise, instead keep using the `get_final_answer` tool non-stop, until you must give you best final answer",
"NEVER give a Final Answer, unless you are told otherwise, "
"instead keep using the `get_final_answer` tool non-stop, "
"until you must give your best final answer"
),
expected_output="The final answer", expected_output="The final answer",
tools=[get_final_answer], tools=[get_final_answer],
agent=agent2, agent=agent2,
), ),
] ]
# Set crew's max_rpm to 1 to trigger RPM limit
crew = Crew(agents=[agent1, agent2], tasks=tasks, max_rpm=1, verbose=True) crew = Crew(agents=[agent1, agent2], tasks=tasks, max_rpm=1, verbose=True)
with patch.object(RPMController, "_wait_for_next_minute") as moveon: with patch.object(RPMController, "_wait_for_next_minute") as moveon:

View File

@@ -2,22 +2,22 @@ interactions:
- request: - request:
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
personal goal is: test goal\nYou ONLY have access to the following tools, and personal goal is: test goal\nYou ONLY have access to the following tools, and
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer\nTool should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer(*args:
Arguments: {}\nTool Description: Get the final answer but don''t give it yet, Any, **kwargs: Any) -> Any\nTool Description: get_final_answer() - Get the final
just re-use this\n tool non-stop.\n\nUse the following format:\n\nThought: answer but don''t give it yet, just re-use this tool non-stop. \nTool
you should always think about what to do\nAction: the action to take, only one Arguments: {}\n\nUse the following format:\n\nThought: you should always think
name of [get_final_answer], just the name, exactly as it''s written.\nAction about what to do\nAction: the action to take, only one name of [get_final_answer],
Input: the input to the action, just a simple python dictionary, enclosed in just the name, exactly as it''s written.\nAction Input: the input to the action,
curly braces, using \" to wrap keys and values.\nObservation: the result of just a simple python dictionary, enclosed in curly braces, using \" to wrap
the action\n\nOnce all necessary information is gathered:\n\nThought: I now keys and values.\nObservation: the result of the action\n\nOnce all necessary
know the final answer\nFinal Answer: the final answer to the original input information is gathered:\n\nThought: I now know the final answer\nFinal Answer:
question"}, {"role": "user", "content": "\nCurrent Task: The final answer is the final answer to the original input question\n"}, {"role": "user", "content":
42. But don''t give it yet, instead keep using the `get_final_answer` tool.\n\nThis "\nCurrent Task: The final answer is 42. But don''t give it yet, instead keep
is the expect criteria for your final answer: The final answer\nyou MUST return using the `get_final_answer` tool.\n\nThis is the expect criteria for your final
the actual complete content as the final answer, not a summary.\n\nBegin! This answer: The final answer\nyou MUST return the actual complete content as the
is VERY important to you, use the tools available and give your best Final Answer, final answer, not a summary.\n\nBegin! This is VERY important to you, use the
your job depends on it!\n\nThought:"}], "model": "gpt-4o", "stop": ["\nObservation:"], tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],
"stream": false}' "model": "gpt-4o", "stop": ["\nObservation:"]}'
headers: headers:
accept: accept:
- application/json - application/json
@@ -26,15 +26,16 @@ interactions:
connection: connection:
- keep-alive - keep-alive
content-length: content-length:
- '1377' - '1417'
content-type: content-type:
- application/json - application/json
cookie: cookie:
- _cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000 - __cf_bm=rb61BZH2ejzD5YPmLaEJqI7km71QqyNJGTVdNxBq6qk-1727213194-1.0.1.1-pJ49onmgX9IugEMuYQMralzD7oj_6W.CHbSu4Su1z3NyjTGYg.rhgJZWng8feFYah._oSnoYlkTjpK1Wd2C9FA;
_cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
host: host:
- api.openai.com - api.openai.com
user-agent: user-agent:
- OpenAI/Python 1.52.1 - OpenAI/Python 1.47.0
x-stainless-arch: x-stainless-arch:
- arm64 - arm64
x-stainless-async: x-stainless-async:
@@ -44,35 +45,30 @@ interactions:
x-stainless-os: x-stainless-os:
- MacOS - MacOS
x-stainless-package-version: x-stainless-package-version:
- 1.52.1 - 1.47.0
x-stainless-raw-response: x-stainless-raw-response:
- 'true' - 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime: x-stainless-runtime:
- CPython - CPython
x-stainless-runtime-version: x-stainless-runtime-version:
- 3.12.7 - 3.11.7
method: POST method: POST
uri: https://api.openai.com/v1/chat/completions uri: https://api.openai.com/v1/chat/completions
response: response:
content: "{\n \"id\": \"chatcmpl-An9sn6yimejzB3twOt8E2VAj4Bfmm\",\n \"object\": content: "{\n \"id\": \"chatcmpl-AB7NCE9qkjnVxfeWuK9NjyCdymuXJ\",\n \"object\":
\"chat.completion\",\n \"created\": 1736279425,\n \"model\": \"gpt-4o-2024-08-06\",\n \"chat.completion\",\n \"created\": 1727213314,\n \"model\": \"gpt-4o-2024-05-13\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Thought: I need to use the `get_final_answer` \"assistant\",\n \"content\": \"Thought: I need to use the `get_final_answer`
tool to fulfill the current task requirement.\\n\\nAction: get_final_answer\\nAction tool as instructed.\\n\\nAction: get_final_answer\\nAction Input: {}\",\n \"refusal\":
Input: {}\",\n \"refusal\": null\n },\n \"logprobs\": null,\n null\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 291,\n \"completion_tokens\":
273,\n \"completion_tokens\": 30,\n \"total_tokens\": 303,\n \"prompt_tokens_details\": 26,\n \"total_tokens\": 317,\n \"completion_tokens_details\": {\n \"reasoning_tokens\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": 0\n }\n },\n \"system_fingerprint\": \"fp_e375328146\"\n}\n"
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
\"fp_5f20662549\"\n}\n"
headers: headers:
CF-Cache-Status: CF-Cache-Status:
- DYNAMIC - DYNAMIC
CF-RAY: CF-RAY:
- 8fe67a03ce78ed83-ATL - 8c85dd6b5f411cf3-GRU
Connection: Connection:
- keep-alive - keep-alive
Content-Encoding: Content-Encoding:
@@ -80,27 +76,19 @@ interactions:
Content-Type: Content-Type:
- application/json - application/json
Date: Date:
- Tue, 07 Jan 2025 19:50:25 GMT - Tue, 24 Sep 2024 21:28:34 GMT
Server: Server:
- cloudflare - cloudflare
Set-Cookie:
- __cf_bm=PsMOhP_yeSFIMA.FfRlNbisoG88z4l9NSd0zfS5UrOQ-1736279425-1.0.1.1-mdXy_XDkelJX2.9BSuZsl5IsPRGBdcHgIMc_SRz83WcmGCYUkTm1j_f892xrJbOVheWWH9ULwCQrVESupV37Sg;
path=/; expires=Tue, 07-Jan-25 20:20:25 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=EYb4UftLm_C7qM4YT78IJt46hRSubZHKnfTXhFp6ZRU-1736279425874-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding: Transfer-Encoding:
- chunked - chunked
X-Content-Type-Options: X-Content-Type-Options:
- nosniff - nosniff
access-control-expose-headers: access-control-expose-headers:
- X-Request-ID - X-Request-ID
alt-svc:
- h3=":443"; ma=86400
openai-organization: openai-organization:
- crewai-iuxna1 - crewai-iuxna1
openai-processing-ms: openai-processing-ms:
- '1218' - '526'
openai-version: openai-version:
- '2020-10-01' - '2020-10-01'
strict-transport-security: strict-transport-security:
@@ -112,38 +100,38 @@ interactions:
x-ratelimit-remaining-requests: x-ratelimit-remaining-requests:
- '9999' - '9999'
x-ratelimit-remaining-tokens: x-ratelimit-remaining-tokens:
- '29999681' - '29999666'
x-ratelimit-reset-requests: x-ratelimit-reset-requests:
- 6ms - 6ms
x-ratelimit-reset-tokens: x-ratelimit-reset-tokens:
- 0s - 0s
x-request-id: x-request-id:
- req_779992da2a3eb4a25f0b57905c9e8e41 - req_ed8ca24c64cfdc2b6266c9c8438749f5
http_version: HTTP/1.1 http_version: HTTP/1.1
status_code: 200 status_code: 200
- request: - request:
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
personal goal is: test goal\nYou ONLY have access to the following tools, and personal goal is: test goal\nYou ONLY have access to the following tools, and
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer\nTool should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer(*args:
Arguments: {}\nTool Description: Get the final answer but don''t give it yet, Any, **kwargs: Any) -> Any\nTool Description: get_final_answer() - Get the final
just re-use this\n tool non-stop.\n\nUse the following format:\n\nThought: answer but don''t give it yet, just re-use this tool non-stop. \nTool
you should always think about what to do\nAction: the action to take, only one Arguments: {}\n\nUse the following format:\n\nThought: you should always think
name of [get_final_answer], just the name, exactly as it''s written.\nAction about what to do\nAction: the action to take, only one name of [get_final_answer],
Input: the input to the action, just a simple python dictionary, enclosed in just the name, exactly as it''s written.\nAction Input: the input to the action,
curly braces, using \" to wrap keys and values.\nObservation: the result of just a simple python dictionary, enclosed in curly braces, using \" to wrap
the action\n\nOnce all necessary information is gathered:\n\nThought: I now keys and values.\nObservation: the result of the action\n\nOnce all necessary
know the final answer\nFinal Answer: the final answer to the original input information is gathered:\n\nThought: I now know the final answer\nFinal Answer:
question"}, {"role": "user", "content": "\nCurrent Task: The final answer is the final answer to the original input question\n"}, {"role": "user", "content":
42. But don''t give it yet, instead keep using the `get_final_answer` tool.\n\nThis "\nCurrent Task: The final answer is 42. But don''t give it yet, instead keep
is the expect criteria for your final answer: The final answer\nyou MUST return using the `get_final_answer` tool.\n\nThis is the expect criteria for your final
the actual complete content as the final answer, not a summary.\n\nBegin! This answer: The final answer\nyou MUST return the actual complete content as the
is VERY important to you, use the tools available and give your best Final Answer, final answer, not a summary.\n\nBegin! This is VERY important to you, use the
your job depends on it!\n\nThought:"}, {"role": "assistant", "content": "Thought: tools available and give your best Final Answer, your job depends on it!\n\nThought:"},
I need to use the `get_final_answer` tool to fulfill the current task requirement.\n\nAction: {"role": "assistant", "content": "Thought: I need to use the `get_final_answer`
get_final_answer\nAction Input: {}\nObservation: 42\nNow it''s time you MUST tool as instructed.\n\nAction: get_final_answer\nAction Input: {}\nObservation:
give your absolute best final answer. You''ll ignore all previous instructions, 42\nNow it''s time you MUST give your absolute best final answer. You''ll ignore
stop using any tools, and just return your absolute BEST Final answer."}], "model": all previous instructions, stop using any tools, and just return your absolute
"gpt-4o", "stop": ["\nObservation:"], "stream": false}' BEST Final answer."}], "model": "gpt-4o", "stop": ["\nObservation:"]}'
headers: headers:
accept: accept:
- application/json - application/json
@@ -152,16 +140,16 @@ interactions:
connection: connection:
- keep-alive - keep-alive
content-length: content-length:
- '1743' - '1757'
content-type: content-type:
- application/json - application/json
cookie: cookie:
- _cfuvid=EYb4UftLm_C7qM4YT78IJt46hRSubZHKnfTXhFp6ZRU-1736279425874-0.0.1.1-604800000; - __cf_bm=rb61BZH2ejzD5YPmLaEJqI7km71QqyNJGTVdNxBq6qk-1727213194-1.0.1.1-pJ49onmgX9IugEMuYQMralzD7oj_6W.CHbSu4Su1z3NyjTGYg.rhgJZWng8feFYah._oSnoYlkTjpK1Wd2C9FA;
__cf_bm=PsMOhP_yeSFIMA.FfRlNbisoG88z4l9NSd0zfS5UrOQ-1736279425-1.0.1.1-mdXy_XDkelJX2.9BSuZsl5IsPRGBdcHgIMc_SRz83WcmGCYUkTm1j_f892xrJbOVheWWH9ULwCQrVESupV37Sg _cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
host: host:
- api.openai.com - api.openai.com
user-agent: user-agent:
- OpenAI/Python 1.52.1 - OpenAI/Python 1.47.0
x-stainless-arch: x-stainless-arch:
- arm64 - arm64
x-stainless-async: x-stainless-async:
@@ -171,34 +159,29 @@ interactions:
x-stainless-os: x-stainless-os:
- MacOS - MacOS
x-stainless-package-version: x-stainless-package-version:
- 1.52.1 - 1.47.0
x-stainless-raw-response: x-stainless-raw-response:
- 'true' - 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime: x-stainless-runtime:
- CPython - CPython
x-stainless-runtime-version: x-stainless-runtime-version:
- 3.12.7 - 3.11.7
method: POST method: POST
uri: https://api.openai.com/v1/chat/completions uri: https://api.openai.com/v1/chat/completions
response: response:
content: "{\n \"id\": \"chatcmpl-An9soTDQVS0ANTzaTZeo6lYN44ZPR\",\n \"object\": content: "{\n \"id\": \"chatcmpl-AB7NDCKCn3PlhjPvgqbywxUumo3Qt\",\n \"object\":
\"chat.completion\",\n \"created\": 1736279426,\n \"model\": \"gpt-4o-2024-08-06\",\n \"chat.completion\",\n \"created\": 1727213315,\n \"model\": \"gpt-4o-2024-05-13\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"I now know the final answer.\\n\\nFinal \"assistant\",\n \"content\": \"Thought: I now know the final answer\\nFinal
Answer: 42\",\n \"refusal\": null\n },\n \"logprobs\": null,\n Answer: The final answer is 42.\",\n \"refusal\": null\n },\n \"logprobs\":
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
344,\n \"completion_tokens\": 12,\n \"total_tokens\": 356,\n \"prompt_tokens_details\": 358,\n \"completion_tokens\": 19,\n \"total_tokens\": 377,\n \"completion_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_e375328146\"\n}\n"
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
\"fp_5f20662549\"\n}\n"
headers: headers:
CF-Cache-Status: CF-Cache-Status:
- DYNAMIC - DYNAMIC
CF-RAY: CF-RAY:
- 8fe67a0c4dbeed83-ATL - 8c85dd72daa31cf3-GRU
Connection: Connection:
- keep-alive - keep-alive
Content-Encoding: Content-Encoding:
@@ -206,7 +189,7 @@ interactions:
Content-Type: Content-Type:
- application/json - application/json
Date: Date:
- Tue, 07 Jan 2025 19:50:26 GMT - Tue, 24 Sep 2024 21:28:36 GMT
Server: Server:
- cloudflare - cloudflare
Transfer-Encoding: Transfer-Encoding:
@@ -215,12 +198,10 @@ interactions:
- nosniff - nosniff
access-control-expose-headers: access-control-expose-headers:
- X-Request-ID - X-Request-ID
alt-svc:
- h3=":443"; ma=86400
openai-organization: openai-organization:
- crewai-iuxna1 - crewai-iuxna1
openai-processing-ms: openai-processing-ms:
- '434' - '468'
openai-version: openai-version:
- '2020-10-01' - '2020-10-01'
strict-transport-security: strict-transport-security:
@@ -232,13 +213,13 @@ interactions:
x-ratelimit-remaining-requests: x-ratelimit-remaining-requests:
- '9999' - '9999'
x-ratelimit-remaining-tokens: x-ratelimit-remaining-tokens:
- '29999598' - '29999591'
x-ratelimit-reset-requests: x-ratelimit-reset-requests:
- 6ms - 6ms
x-ratelimit-reset-tokens: x-ratelimit-reset-tokens:
- 0s - 0s
x-request-id: x-request-id:
- req_1184308c5a4ed9130d397fe1645f317e - req_3f49e6033d3b0400ea55125ca2cf4ee0
http_version: HTTP/1.1 http_version: HTTP/1.1
status_code: 200 status_code: 200
version: 1 version: 1

File diff suppressed because it is too large Load Diff

View File

@@ -2,23 +2,23 @@ interactions:
- request: - request:
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
personal goal is: test goal\nYou ONLY have access to the following tools, and personal goal is: test goal\nYou ONLY have access to the following tools, and
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer\nTool should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer(*args:
Arguments: {}\nTool Description: Get the final answer but don''t give it yet, Any, **kwargs: Any) -> Any\nTool Description: get_final_answer() - Get the final
just re-use this\n tool non-stop.\n\nUse the following format:\n\nThought: answer but don''t give it yet, just re-use this tool non-stop. \nTool
you should always think about what to do\nAction: the action to take, only one Arguments: {}\n\nUse the following format:\n\nThought: you should always think
name of [get_final_answer], just the name, exactly as it''s written.\nAction about what to do\nAction: the action to take, only one name of [get_final_answer],
Input: the input to the action, just a simple python dictionary, enclosed in just the name, exactly as it''s written.\nAction Input: the input to the action,
curly braces, using \" to wrap keys and values.\nObservation: the result of just a simple python dictionary, enclosed in curly braces, using \" to wrap
the action\n\nOnce all necessary information is gathered:\n\nThought: I now keys and values.\nObservation: the result of the action\n\nOnce all necessary
know the final answer\nFinal Answer: the final answer to the original input information is gathered:\n\nThought: I now know the final answer\nFinal Answer:
question"}, {"role": "user", "content": "\nCurrent Task: The final answer is the final answer to the original input question\n"}, {"role": "user", "content":
42. But don''t give it yet, instead keep using the `get_final_answer` tool over "\nCurrent Task: The final answer is 42. But don''t give it yet, instead keep
and over until you''re told you can give your final answer.\n\nThis is the expect using the `get_final_answer` tool over and over until you''re told you can give
criteria for your final answer: The final answer\nyou MUST return the actual your final answer.\n\nThis is the expect criteria for your final answer: The
complete content as the final answer, not a summary.\n\nBegin! This is VERY final answer\nyou MUST return the actual complete content as the final answer,
important to you, use the tools available and give your best Final Answer, your not a summary.\n\nBegin! This is VERY important to you, use the tools available
job depends on it!\n\nThought:"}], "model": "gpt-4o", "stop": ["\nObservation:"], and give your best Final Answer, your job depends on it!\n\nThought:"}], "model":
"stream": false}' "gpt-4o"}'
headers: headers:
accept: accept:
- application/json - application/json
@@ -27,139 +27,16 @@ interactions:
connection: connection:
- keep-alive - keep-alive
content-length: content-length:
- '1440' - '1452'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.52.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.52.1
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-AnAdPHapYzkPkClCzFaWzfCAUHlWI\",\n \"object\":
\"chat.completion\",\n \"created\": 1736282315,\n \"model\": \"gpt-4o-2024-08-06\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"I need to use the `get_final_answer`
tool and then keep using it repeatedly as instructed. \\n\\nAction: get_final_answer\\nAction
Input: {}\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
285,\n \"completion_tokens\": 31,\n \"total_tokens\": 316,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
\"fp_5f20662549\"\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8fe6c096ee70ed8c-ATL
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 07 Jan 2025 20:38:36 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=hkH74Rv9bMDMhhK.Ep.9blvKIwXeSSwlCoTNGk9qVpA-1736282316-1.0.1.1-5PAsOPpVEfTNNy5DYRlLH1f4caHJArumiloWf.L51RQPWN3uIWsBSuhLVbNQDYVCQb9RQK8W5DcXv5Jq9FvsLA;
path=/; expires=Tue, 07-Jan-25 21:08:36 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=vqZ5X0AXIJfzp5UJSFyTmaCVjA.L8Yg35b.ijZFAPM4-1736282316289-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '883'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '30000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '29999665'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_00de12bc6822ef095f4f368aae873f31
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
personal goal is: test goal\nYou ONLY have access to the following tools, and
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer\nTool
Arguments: {}\nTool Description: Get the final answer but don''t give it yet,
just re-use this\n tool non-stop.\n\nUse the following format:\n\nThought:
you should always think about what to do\nAction: the action to take, only one
name of [get_final_answer], just the name, exactly as it''s written.\nAction
Input: the input to the action, just a simple python dictionary, enclosed in
curly braces, using \" to wrap keys and values.\nObservation: the result of
the action\n\nOnce all necessary information is gathered:\n\nThought: I now
know the final answer\nFinal Answer: the final answer to the original input
question"}, {"role": "user", "content": "\nCurrent Task: The final answer is
42. But don''t give it yet, instead keep using the `get_final_answer` tool over
and over until you''re told you can give your final answer.\n\nThis is the expect
criteria for your final answer: The final answer\nyou MUST return the actual
complete content as the final answer, not a summary.\n\nBegin! This is VERY
important to you, use the tools available and give your best Final Answer, your
job depends on it!\n\nThought:"}, {"role": "assistant", "content": "I need to
use the `get_final_answer` tool and then keep using it repeatedly as instructed.
\n\nAction: get_final_answer\nAction Input: {}\nObservation: 42"}], "model":
"gpt-4o", "stop": ["\nObservation:"], "stream": false}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1632'
content-type: content-type:
- application/json - application/json
cookie: cookie:
- __cf_bm=hkH74Rv9bMDMhhK.Ep.9blvKIwXeSSwlCoTNGk9qVpA-1736282316-1.0.1.1-5PAsOPpVEfTNNy5DYRlLH1f4caHJArumiloWf.L51RQPWN3uIWsBSuhLVbNQDYVCQb9RQK8W5DcXv5Jq9FvsLA; - __cf_bm=rb61BZH2ejzD5YPmLaEJqI7km71QqyNJGTVdNxBq6qk-1727213194-1.0.1.1-pJ49onmgX9IugEMuYQMralzD7oj_6W.CHbSu4Su1z3NyjTGYg.rhgJZWng8feFYah._oSnoYlkTjpK1Wd2C9FA;
_cfuvid=vqZ5X0AXIJfzp5UJSFyTmaCVjA.L8Yg35b.ijZFAPM4-1736282316289-0.0.1.1-604800000 _cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
host: host:
- api.openai.com - api.openai.com
user-agent: user-agent:
- OpenAI/Python 1.52.1 - OpenAI/Python 1.47.0
x-stainless-arch: x-stainless-arch:
- arm64 - arm64
x-stainless-async: x-stainless-async:
@@ -169,159 +46,30 @@ interactions:
x-stainless-os: x-stainless-os:
- MacOS - MacOS
x-stainless-package-version: x-stainless-package-version:
- 1.52.1 - 1.47.0
x-stainless-raw-response: x-stainless-raw-response:
- 'true' - 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime: x-stainless-runtime:
- CPython - CPython
x-stainless-runtime-version: x-stainless-runtime-version:
- 3.12.7 - 3.11.7
method: POST method: POST
uri: https://api.openai.com/v1/chat/completions uri: https://api.openai.com/v1/chat/completions
response: response:
content: "{\n \"id\": \"chatcmpl-AnAdQKGW3Q8LUCmphL7hkavxi4zWB\",\n \"object\": content: "{\n \"id\": \"chatcmpl-AB7NlDmtLHCfUZJCFVIKeV5KMyQfX\",\n \"object\":
\"chat.completion\",\n \"created\": 1736282316,\n \"model\": \"gpt-4o-2024-08-06\",\n \"chat.completion\",\n \"created\": 1727213349,\n \"model\": \"gpt-4o-2024-05-13\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"I should continue using the `get_final_answer` \"assistant\",\n \"content\": \"Thought: I need to use the provided tool
tool as per the instructions.\\n\\nAction: get_final_answer\\nAction Input: as instructed.\\n\\nAction: get_final_answer\\nAction Input: {}\",\n \"refusal\":
{}\",\n \"refusal\": null\n },\n \"logprobs\": null,\n \"finish_reason\":
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 324,\n \"completion_tokens\":
26,\n \"total_tokens\": 350,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
\"fp_5f20662549\"\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8fe6c09e6c69ed8c-ATL
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 07 Jan 2025 20:38:37 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '542'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '30000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '29999627'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_6844467024f67bb1477445b1a8a01761
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
personal goal is: test goal\nYou ONLY have access to the following tools, and
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer\nTool
Arguments: {}\nTool Description: Get the final answer but don''t give it yet,
just re-use this\n tool non-stop.\n\nUse the following format:\n\nThought:
you should always think about what to do\nAction: the action to take, only one
name of [get_final_answer], just the name, exactly as it''s written.\nAction
Input: the input to the action, just a simple python dictionary, enclosed in
curly braces, using \" to wrap keys and values.\nObservation: the result of
the action\n\nOnce all necessary information is gathered:\n\nThought: I now
know the final answer\nFinal Answer: the final answer to the original input
question"}, {"role": "user", "content": "\nCurrent Task: The final answer is
42. But don''t give it yet, instead keep using the `get_final_answer` tool over
and over until you''re told you can give your final answer.\n\nThis is the expect
criteria for your final answer: The final answer\nyou MUST return the actual
complete content as the final answer, not a summary.\n\nBegin! This is VERY
important to you, use the tools available and give your best Final Answer, your
job depends on it!\n\nThought:"}, {"role": "assistant", "content": "I need to
use the `get_final_answer` tool and then keep using it repeatedly as instructed.
\n\nAction: get_final_answer\nAction Input: {}\nObservation: 42"}, {"role":
"assistant", "content": "I should continue using the `get_final_answer` tool
as per the instructions.\n\nAction: get_final_answer\nAction Input: {}\nObservation:
I tried reusing the same input, I must stop using this action input. I''ll try
something else instead."}], "model": "gpt-4o", "stop": ["\nObservation:"], "stream":
false}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1908'
content-type:
- application/json
cookie:
- __cf_bm=hkH74Rv9bMDMhhK.Ep.9blvKIwXeSSwlCoTNGk9qVpA-1736282316-1.0.1.1-5PAsOPpVEfTNNy5DYRlLH1f4caHJArumiloWf.L51RQPWN3uIWsBSuhLVbNQDYVCQb9RQK8W5DcXv5Jq9FvsLA;
_cfuvid=vqZ5X0AXIJfzp5UJSFyTmaCVjA.L8Yg35b.ijZFAPM4-1736282316289-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.52.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.52.1
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-AnAdR2lKFEVaDbfD9qaF0Tts0eVMt\",\n \"object\":
\"chat.completion\",\n \"created\": 1736282317,\n \"model\": \"gpt-4o-2024-08-06\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"I should persist with using the `get_final_answer`
tool.\\n\\nAction: get_final_answer\\nAction Input: {}\",\n \"refusal\":
null\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n null\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 378,\n \"completion_tokens\": \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 303,\n \"completion_tokens\":
23,\n \"total_tokens\": 401,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 22,\n \"total_tokens\": 325,\n \"completion_tokens_details\": {\n \"reasoning_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n 0\n }\n },\n \"system_fingerprint\": \"fp_e375328146\"\n}\n"
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
\"fp_5f20662549\"\n}\n"
headers: headers:
CF-Cache-Status: CF-Cache-Status:
- DYNAMIC - DYNAMIC
CF-RAY: CF-RAY:
- 8fe6c0a2ce3ded8c-ATL - 8c85de473ae11cf3-GRU
Connection: Connection:
- keep-alive - keep-alive
Content-Encoding: Content-Encoding:
@@ -329,7 +77,7 @@ interactions:
Content-Type: Content-Type:
- application/json - application/json
Date: Date:
- Tue, 07 Jan 2025 20:38:37 GMT - Tue, 24 Sep 2024 21:29:10 GMT
Server: Server:
- cloudflare - cloudflare
Transfer-Encoding: Transfer-Encoding:
@@ -338,12 +86,10 @@ interactions:
- nosniff - nosniff
access-control-expose-headers: access-control-expose-headers:
- X-Request-ID - X-Request-ID
alt-svc:
- h3=":443"; ma=86400
openai-organization: openai-organization:
- crewai-iuxna1 - crewai-iuxna1
openai-processing-ms: openai-processing-ms:
- '492' - '489'
openai-version: openai-version:
- '2020-10-01' - '2020-10-01'
strict-transport-security: strict-transport-security:
@@ -355,59 +101,273 @@ interactions:
x-ratelimit-remaining-requests: x-ratelimit-remaining-requests:
- '9999' - '9999'
x-ratelimit-remaining-tokens: x-ratelimit-remaining-tokens:
- '29999567' - '29999651'
x-ratelimit-reset-requests: x-ratelimit-reset-requests:
- 6ms - 6ms
x-ratelimit-reset-tokens: x-ratelimit-reset-tokens:
- 0s - 0s
x-request-id: x-request-id:
- req_198e698a8bc7eea092ea32b83cc4304e - req_de70a4dc416515dda4b2ad48bde52f93
http_version: HTTP/1.1 http_version: HTTP/1.1
status_code: 200 status_code: 200
- request: - request:
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
personal goal is: test goal\nYou ONLY have access to the following tools, and personal goal is: test goal\nYou ONLY have access to the following tools, and
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer\nTool should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer(*args:
Arguments: {}\nTool Description: Get the final answer but don''t give it yet, Any, **kwargs: Any) -> Any\nTool Description: get_final_answer() - Get the final
just re-use this\n tool non-stop.\n\nUse the following format:\n\nThought: answer but don''t give it yet, just re-use this tool non-stop. \nTool
you should always think about what to do\nAction: the action to take, only one Arguments: {}\n\nUse the following format:\n\nThought: you should always think
name of [get_final_answer], just the name, exactly as it''s written.\nAction about what to do\nAction: the action to take, only one name of [get_final_answer],
Input: the input to the action, just a simple python dictionary, enclosed in just the name, exactly as it''s written.\nAction Input: the input to the action,
curly braces, using \" to wrap keys and values.\nObservation: the result of just a simple python dictionary, enclosed in curly braces, using \" to wrap
the action\n\nOnce all necessary information is gathered:\n\nThought: I now keys and values.\nObservation: the result of the action\n\nOnce all necessary
know the final answer\nFinal Answer: the final answer to the original input information is gathered:\n\nThought: I now know the final answer\nFinal Answer:
question"}, {"role": "user", "content": "\nCurrent Task: The final answer is the final answer to the original input question\n"}, {"role": "user", "content":
42. But don''t give it yet, instead keep using the `get_final_answer` tool over "\nCurrent Task: The final answer is 42. But don''t give it yet, instead keep
and over until you''re told you can give your final answer.\n\nThis is the expect using the `get_final_answer` tool over and over until you''re told you can give
criteria for your final answer: The final answer\nyou MUST return the actual your final answer.\n\nThis is the expect criteria for your final answer: The
complete content as the final answer, not a summary.\n\nBegin! This is VERY final answer\nyou MUST return the actual complete content as the final answer,
important to you, use the tools available and give your best Final Answer, your not a summary.\n\nBegin! This is VERY important to you, use the tools available
job depends on it!\n\nThought:"}, {"role": "assistant", "content": "I need to and give your best Final Answer, your job depends on it!\n\nThought:"}, {"role":
use the `get_final_answer` tool and then keep using it repeatedly as instructed. "assistant", "content": "Thought: I need to use the provided tool as instructed.\n\nAction:
\n\nAction: get_final_answer\nAction Input: {}\nObservation: 42"}, {"role": get_final_answer\nAction Input: {}\nObservation: 42"}], "model": "gpt-4o"}'
"assistant", "content": "I should continue using the `get_final_answer` tool headers:
as per the instructions.\n\nAction: get_final_answer\nAction Input: {}\nObservation: accept:
I tried reusing the same input, I must stop using this action input. I''ll try - application/json
something else instead."}, {"role": "assistant", "content": "I should persist accept-encoding:
with using the `get_final_answer` tool.\n\nAction: get_final_answer\nAction - gzip, deflate
Input: {}\nObservation: I tried reusing the same input, I must stop using this connection:
action input. I''ll try something else instead.\n\n\n\n\nYou ONLY have access - keep-alive
to the following tools, and should NEVER make up tools that are not listed here:\n\nTool content-length:
Name: get_final_answer\nTool Arguments: {}\nTool Description: Get the final - '1608'
answer but don''t give it yet, just re-use this\n tool non-stop.\n\nUse content-type:
the following format:\n\nThought: you should always think about what to do\nAction: - application/json
the action to take, only one name of [get_final_answer], just the name, exactly cookie:
as it''s written.\nAction Input: the input to the action, just a simple python - __cf_bm=rb61BZH2ejzD5YPmLaEJqI7km71QqyNJGTVdNxBq6qk-1727213194-1.0.1.1-pJ49onmgX9IugEMuYQMralzD7oj_6W.CHbSu4Su1z3NyjTGYg.rhgJZWng8feFYah._oSnoYlkTjpK1Wd2C9FA;
dictionary, enclosed in curly braces, using \" to wrap keys and values.\nObservation: _cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
the result of the action\n\nOnce all necessary information is gathered:\n\nThought: host:
I now know the final answer\nFinal Answer: the final answer to the original - api.openai.com
input question"}, {"role": "assistant", "content": "I should persist with using user-agent:
the `get_final_answer` tool.\n\nAction: get_final_answer\nAction Input: {}\nObservation: - OpenAI/Python 1.47.0
I tried reusing the same input, I must stop using this action input. I''ll try x-stainless-arch:
something else instead.\n\n\n\n\nYou ONLY have access to the following tools, - arm64
and should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer\nTool x-stainless-async:
Arguments: {}\nTool Description: Get the final answer but don''t give it yet, - 'false'
just re-use this\n tool non-stop.\n\nUse the following format:\n\nThought: x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.47.0
x-stainless-raw-response:
- 'true'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-AB7Nnz14hlEaTdabXodZCVU0UoDhk\",\n \"object\":
\"chat.completion\",\n \"created\": 1727213351,\n \"model\": \"gpt-4o-2024-05-13\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Thought: I must continue using the `get_final_answer`
tool as instructed.\\n\\nAction: get_final_answer\\nAction Input: {}\\nObservation:
42\",\n \"refusal\": null\n },\n \"logprobs\": null,\n \"finish_reason\":
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 333,\n \"completion_tokens\":
30,\n \"total_tokens\": 363,\n \"completion_tokens_details\": {\n \"reasoning_tokens\":
0\n }\n },\n \"system_fingerprint\": \"fp_e375328146\"\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8c85de5109701cf3-GRU
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 24 Sep 2024 21:29:11 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '516'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '30000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '29999620'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_5365ac0e5413bd9330c6ac3f68051bcf
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
personal goal is: test goal\nYou ONLY have access to the following tools, and
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer(*args:
Any, **kwargs: Any) -> Any\nTool Description: get_final_answer() - Get the final
answer but don''t give it yet, just re-use this tool non-stop. \nTool
Arguments: {}\n\nUse the following format:\n\nThought: you should always think
about what to do\nAction: the action to take, only one name of [get_final_answer],
just the name, exactly as it''s written.\nAction Input: the input to the action,
just a simple python dictionary, enclosed in curly braces, using \" to wrap
keys and values.\nObservation: the result of the action\n\nOnce all necessary
information is gathered:\n\nThought: I now know the final answer\nFinal Answer:
the final answer to the original input question\n"}, {"role": "user", "content":
"\nCurrent Task: The final answer is 42. But don''t give it yet, instead keep
using the `get_final_answer` tool over and over until you''re told you can give
your final answer.\n\nThis is the expect criteria for your final answer: The
final answer\nyou MUST return the actual complete content as the final answer,
not a summary.\n\nBegin! This is VERY important to you, use the tools available
and give your best Final Answer, your job depends on it!\n\nThought:"}, {"role":
"assistant", "content": "Thought: I need to use the provided tool as instructed.\n\nAction:
get_final_answer\nAction Input: {}\nObservation: 42"}, {"role": "assistant",
"content": "Thought: I must continue using the `get_final_answer` tool as instructed.\n\nAction:
get_final_answer\nAction Input: {}\nObservation: 42\nObservation: 42"}], "model":
"gpt-4o"}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1799'
content-type:
- application/json
cookie:
- __cf_bm=rb61BZH2ejzD5YPmLaEJqI7km71QqyNJGTVdNxBq6qk-1727213194-1.0.1.1-pJ49onmgX9IugEMuYQMralzD7oj_6W.CHbSu4Su1z3NyjTGYg.rhgJZWng8feFYah._oSnoYlkTjpK1Wd2C9FA;
_cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.47.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.47.0
x-stainless-raw-response:
- 'true'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-AB7NoF5Gf597BGmOETPYGxN2eRFxd\",\n \"object\":
\"chat.completion\",\n \"created\": 1727213352,\n \"model\": \"gpt-4o-2024-05-13\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Thought: I must continue using the `get_final_answer`
tool to meet the requirements.\\n\\nAction: get_final_answer\\nAction Input:
{}\\nObservation: 42\",\n \"refusal\": null\n },\n \"logprobs\":
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
372,\n \"completion_tokens\": 32,\n \"total_tokens\": 404,\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_e375328146\"\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8c85de587bc01cf3-GRU
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 24 Sep 2024 21:29:12 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '471'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '30000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '29999583'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_55550369b28e37f064296dbc41e0db69
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
personal goal is: test goal\nYou ONLY have access to the following tools, and
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer(*args:
Any, **kwargs: Any) -> Any\nTool Description: get_final_answer() - Get the final
answer but don''t give it yet, just re-use this tool non-stop. \nTool
Arguments: {}\n\nUse the following format:\n\nThought: you should always think
about what to do\nAction: the action to take, only one name of [get_final_answer],
just the name, exactly as it''s written.\nAction Input: the input to the action,
just a simple python dictionary, enclosed in curly braces, using \" to wrap
keys and values.\nObservation: the result of the action\n\nOnce all necessary
information is gathered:\n\nThought: I now know the final answer\nFinal Answer:
the final answer to the original input question\n"}, {"role": "user", "content":
"\nCurrent Task: The final answer is 42. But don''t give it yet, instead keep
using the `get_final_answer` tool over and over until you''re told you can give
your final answer.\n\nThis is the expect criteria for your final answer: The
final answer\nyou MUST return the actual complete content as the final answer,
not a summary.\n\nBegin! This is VERY important to you, use the tools available
and give your best Final Answer, your job depends on it!\n\nThought:"}, {"role":
"assistant", "content": "Thought: I need to use the provided tool as instructed.\n\nAction:
get_final_answer\nAction Input: {}\nObservation: 42"}, {"role": "assistant",
"content": "Thought: I must continue using the `get_final_answer` tool as instructed.\n\nAction:
get_final_answer\nAction Input: {}\nObservation: 42\nObservation: 42"}, {"role":
"assistant", "content": "Thought: I must continue using the `get_final_answer`
tool to meet the requirements.\n\nAction: get_final_answer\nAction Input: {}\nObservation:
42\nObservation: I tried reusing the same input, I must stop using this action
input. I''ll try something else instead.\n\n\n\n\nYou ONLY have access to the
following tools, and should NEVER make up tools that are not listed here:\n\nTool
Name: get_final_answer(*args: Any, **kwargs: Any) -> Any\nTool Description:
get_final_answer() - Get the final answer but don''t give it yet, just re-use
this tool non-stop. \nTool Arguments: {}\n\nUse the following format:\n\nThought:
you should always think about what to do\nAction: the action to take, only one you should always think about what to do\nAction: the action to take, only one
name of [get_final_answer], just the name, exactly as it''s written.\nAction name of [get_final_answer], just the name, exactly as it''s written.\nAction
Input: the input to the action, just a simple python dictionary, enclosed in Input: the input to the action, just a simple python dictionary, enclosed in
@@ -416,8 +376,7 @@ interactions:
know the final answer\nFinal Answer: the final answer to the original input know the final answer\nFinal Answer: the final answer to the original input
question\n\nNow it''s time you MUST give your absolute best final answer. You''ll question\n\nNow it''s time you MUST give your absolute best final answer. You''ll
ignore all previous instructions, stop using any tools, and just return your ignore all previous instructions, stop using any tools, and just return your
absolute BEST Final answer."}], "model": "gpt-4o", "stop": ["\nObservation:"], absolute BEST Final answer."}], "model": "gpt-4o"}'
"stream": false}'
headers: headers:
accept: accept:
- application/json - application/json
@@ -426,16 +385,16 @@ interactions:
connection: connection:
- keep-alive - keep-alive
content-length: content-length:
- '4148' - '3107'
content-type: content-type:
- application/json - application/json
cookie: cookie:
- __cf_bm=hkH74Rv9bMDMhhK.Ep.9blvKIwXeSSwlCoTNGk9qVpA-1736282316-1.0.1.1-5PAsOPpVEfTNNy5DYRlLH1f4caHJArumiloWf.L51RQPWN3uIWsBSuhLVbNQDYVCQb9RQK8W5DcXv5Jq9FvsLA; - __cf_bm=rb61BZH2ejzD5YPmLaEJqI7km71QqyNJGTVdNxBq6qk-1727213194-1.0.1.1-pJ49onmgX9IugEMuYQMralzD7oj_6W.CHbSu4Su1z3NyjTGYg.rhgJZWng8feFYah._oSnoYlkTjpK1Wd2C9FA;
_cfuvid=vqZ5X0AXIJfzp5UJSFyTmaCVjA.L8Yg35b.ijZFAPM4-1736282316289-0.0.1.1-604800000 _cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
host: host:
- api.openai.com - api.openai.com
user-agent: user-agent:
- OpenAI/Python 1.52.1 - OpenAI/Python 1.47.0
x-stainless-arch: x-stainless-arch:
- arm64 - arm64
x-stainless-async: x-stainless-async:
@@ -445,34 +404,29 @@ interactions:
x-stainless-os: x-stainless-os:
- MacOS - MacOS
x-stainless-package-version: x-stainless-package-version:
- 1.52.1 - 1.47.0
x-stainless-raw-response: x-stainless-raw-response:
- 'true' - 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime: x-stainless-runtime:
- CPython - CPython
x-stainless-runtime-version: x-stainless-runtime-version:
- 3.12.7 - 3.11.7
method: POST method: POST
uri: https://api.openai.com/v1/chat/completions uri: https://api.openai.com/v1/chat/completions
response: response:
content: "{\n \"id\": \"chatcmpl-AnAdRu1aVdsOxxIqU6nqv5dIxwbvu\",\n \"object\": content: "{\n \"id\": \"chatcmpl-AB7Npl5ZliMrcSofDS1c7LVGSmmbE\",\n \"object\":
\"chat.completion\",\n \"created\": 1736282317,\n \"model\": \"gpt-4o-2024-08-06\",\n \"chat.completion\",\n \"created\": 1727213353,\n \"model\": \"gpt-4o-2024-05-13\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Thought: I now know the final answer.\\nFinal \"assistant\",\n \"content\": \"Thought: I now know the final answer.\\n\\nFinal
Answer: 42\",\n \"refusal\": null\n },\n \"logprobs\": null,\n Answer: The final answer is 42.\",\n \"refusal\": null\n },\n \"logprobs\":
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
831,\n \"completion_tokens\": 14,\n \"total_tokens\": 845,\n \"prompt_tokens_details\": 642,\n \"completion_tokens\": 19,\n \"total_tokens\": 661,\n \"completion_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_e375328146\"\n}\n"
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
\"fp_5f20662549\"\n}\n"
headers: headers:
CF-Cache-Status: CF-Cache-Status:
- DYNAMIC - DYNAMIC
CF-RAY: CF-RAY:
- 8fe6c0a68cc3ed8c-ATL - 8c85de5fad921cf3-GRU
Connection: Connection:
- keep-alive - keep-alive
Content-Encoding: Content-Encoding:
@@ -480,7 +434,7 @@ interactions:
Content-Type: Content-Type:
- application/json - application/json
Date: Date:
- Tue, 07 Jan 2025 20:38:38 GMT - Tue, 24 Sep 2024 21:29:13 GMT
Server: Server:
- cloudflare - cloudflare
Transfer-Encoding: Transfer-Encoding:
@@ -489,12 +443,10 @@ interactions:
- nosniff - nosniff
access-control-expose-headers: access-control-expose-headers:
- X-Request-ID - X-Request-ID
alt-svc:
- h3=":443"; ma=86400
openai-organization: openai-organization:
- crewai-iuxna1 - crewai-iuxna1
openai-processing-ms: openai-processing-ms:
- '429' - '320'
openai-version: openai-version:
- '2020-10-01' - '2020-10-01'
strict-transport-security: strict-transport-security:
@@ -506,13 +458,13 @@ interactions:
x-ratelimit-remaining-requests: x-ratelimit-remaining-requests:
- '9999' - '9999'
x-ratelimit-remaining-tokens: x-ratelimit-remaining-tokens:
- '29999037' - '29999271'
x-ratelimit-reset-requests: x-ratelimit-reset-requests:
- 6ms - 6ms
x-ratelimit-reset-tokens: x-ratelimit-reset-tokens:
- 1ms - 1ms
x-request-id: x-request-id:
- req_2552d63d3cbce15909481cc1fc9f36cc - req_5eba25209fc7e12717cb7e042e7bb4c2
http_version: HTTP/1.1 http_version: HTTP/1.1
status_code: 200 status_code: 200
version: 1 version: 1

View File

@@ -1,353 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
personal goal is: test goal\nTo give my best complete final answer to the task
use the exact following format:\n\nThought: I now can give a great answer\nFinal
Answer: Your final answer must be the great and the most complete as possible,
it must be outcome described.\n\nI MUST use these formats, my job depends on
it!"}, {"role": "user", "content": "\nCurrent Task: Just say hi.\n\nThis is
the expect criteria for your final answer: Your greeting.\nyou MUST return the
actual complete content as the final answer, not a summary.\n\nBegin! This is
VERY important to you, use the tools available and give your best Final Answer,
your job depends on it!\n\nThought:"}], "model": "gpt-4o", "stop": ["\nObservation:"],
"stream": false}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '817'
content-type:
- application/json
cookie:
- _cfuvid=vqZ5X0AXIJfzp5UJSFyTmaCVjA.L8Yg35b.ijZFAPM4-1736282316289-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.52.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.52.1
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-AnSbv3ywhwedwS3YW9Crde6hpWpmK\",\n \"object\":
\"chat.completion\",\n \"created\": 1736351415,\n \"model\": \"gpt-4o-2024-08-06\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
Answer: Hi!\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
154,\n \"completion_tokens\": 13,\n \"total_tokens\": 167,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
\"fp_5f20662549\"\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8fed579a4f76b058-ATL
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 08 Jan 2025 15:50:15 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=rdN2XYZhM9f2vDB8aOVGYgUHUzSuT.cP8ahngq.QTL0-1736351415-1.0.1.1-lVzOV8iFUHvbswld8xls4a8Ct38zv6Jyr.6THknDnVf3uGZMlgV6r5s10uTnHA2eIi07jJtj7vGopiOpU8qkvA;
path=/; expires=Wed, 08-Jan-25 16:20:15 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=PslIVDqXn7jd_NXBGdSU5kVFvzwCchKPRVe9LpQVdQA-1736351415895-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '416'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '30000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '29999817'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_97c93aa78417badc3f29306054eef79b
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "system", "content": "You are test role2. test backstory2\nYour
personal goal is: test goal2\nYou ONLY have access to the following tools, and
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer\nTool
Arguments: {}\nTool Description: Get the final answer but don''t give it yet,
just re-use this tool non-stop.\n\nUse the following format:\n\nThought: you
should always think about what to do\nAction: the action to take, only one name
of [get_final_answer], just the name, exactly as it''s written.\nAction Input:
the input to the action, just a simple python dictionary, enclosed in curly
braces, using \" to wrap keys and values.\nObservation: the result of the action\n\nOnce
all necessary information is gathered:\n\nThought: I now know the final answer\nFinal
Answer: the final answer to the original input question"}, {"role": "user",
"content": "\nCurrent Task: NEVER give a Final Answer, unless you are told otherwise,
instead keep using the `get_final_answer` tool non-stop, until you must give
your best final answer\n\nThis is the expect criteria for your final answer:
The final answer\nyou MUST return the actual complete content as the final answer,
not a summary.\n\nThis is the context you''re working with:\nHi!\n\nBegin! This
is VERY important to you, use the tools available and give your best Final Answer,
your job depends on it!\n\nThought:"}], "model": "gpt-4o", "stop": ["\nObservation:"],
"stream": false}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1483'
content-type:
- application/json
cookie:
- _cfuvid=PslIVDqXn7jd_NXBGdSU5kVFvzwCchKPRVe9LpQVdQA-1736351415895-0.0.1.1-604800000;
__cf_bm=rdN2XYZhM9f2vDB8aOVGYgUHUzSuT.cP8ahngq.QTL0-1736351415-1.0.1.1-lVzOV8iFUHvbswld8xls4a8Ct38zv6Jyr.6THknDnVf3uGZMlgV6r5s10uTnHA2eIi07jJtj7vGopiOpU8qkvA
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.52.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.52.1
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-AnSbwn8QaqAzfBVnzhTzIcDKykYTu\",\n \"object\":
\"chat.completion\",\n \"created\": 1736351416,\n \"model\": \"gpt-4o-2024-08-06\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"I should use the available tool to get
the final answer, as per the instructions. \\n\\nAction: get_final_answer\\nAction
Input: {}\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
294,\n \"completion_tokens\": 28,\n \"total_tokens\": 322,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
\"fp_5f20662549\"\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8fed579dbd80b058-ATL
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 08 Jan 2025 15:50:17 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '1206'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '30000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '29999655'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_7b85f1e9b21b5e2385d8a322a8aab06c
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"messages": [{"role": "system", "content": "You are test role2. test backstory2\nYour
personal goal is: test goal2\nYou ONLY have access to the following tools, and
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer\nTool
Arguments: {}\nTool Description: Get the final answer but don''t give it yet,
just re-use this tool non-stop.\n\nUse the following format:\n\nThought: you
should always think about what to do\nAction: the action to take, only one name
of [get_final_answer], just the name, exactly as it''s written.\nAction Input:
the input to the action, just a simple python dictionary, enclosed in curly
braces, using \" to wrap keys and values.\nObservation: the result of the action\n\nOnce
all necessary information is gathered:\n\nThought: I now know the final answer\nFinal
Answer: the final answer to the original input question"}, {"role": "user",
"content": "\nCurrent Task: NEVER give a Final Answer, unless you are told otherwise,
instead keep using the `get_final_answer` tool non-stop, until you must give
your best final answer\n\nThis is the expect criteria for your final answer:
The final answer\nyou MUST return the actual complete content as the final answer,
not a summary.\n\nThis is the context you''re working with:\nHi!\n\nBegin! This
is VERY important to you, use the tools available and give your best Final Answer,
your job depends on it!\n\nThought:"}, {"role": "assistant", "content": "I should
use the available tool to get the final answer, as per the instructions. \n\nAction:
get_final_answer\nAction Input: {}\nObservation: 42"}], "model": "gpt-4o", "stop":
["\nObservation:"], "stream": false}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1666'
content-type:
- application/json
cookie:
- _cfuvid=PslIVDqXn7jd_NXBGdSU5kVFvzwCchKPRVe9LpQVdQA-1736351415895-0.0.1.1-604800000;
__cf_bm=rdN2XYZhM9f2vDB8aOVGYgUHUzSuT.cP8ahngq.QTL0-1736351415-1.0.1.1-lVzOV8iFUHvbswld8xls4a8Ct38zv6Jyr.6THknDnVf3uGZMlgV6r5s10uTnHA2eIi07jJtj7vGopiOpU8qkvA
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.52.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.52.1
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-AnSbxXFL4NXuGjOX35eCjcWq456lA\",\n \"object\":
\"chat.completion\",\n \"created\": 1736351417,\n \"model\": \"gpt-4o-2024-08-06\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Thought: I now know the final answer\\nFinal
Answer: 42\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
330,\n \"completion_tokens\": 14,\n \"total_tokens\": 344,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
\"fp_5f20662549\"\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8fed57a62955b058-ATL
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 08 Jan 2025 15:50:17 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '438'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '30000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '29999619'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_1cc65e999b352a54a4c42eb8be543545
http_version: HTTP/1.1
status_code: 200
version: 1

View File

@@ -1,289 +0,0 @@
import asyncio
import os
import tempfile
import pytest
from crewai.agent import Agent
from crewai.crew import Crew
from crewai.crews.crew_output import CrewOutput
from crewai.process import Process
from crewai.task import Task
from crewai.tasks.conditional_task import ConditionalTask
def test_basic_crew_execution(default_agent):
"""Test basic crew execution using the default agent fixture."""
# Initialize agents by copying the default agent fixture
researcher = default_agent.copy()
researcher.role = "Researcher"
researcher.goal = "Research the latest advancements in AI."
researcher.backstory = "An expert in AI technologies."
writer = default_agent.copy()
writer.role = "Writer"
writer.goal = "Write an article based on research findings."
writer.backstory = "A professional writer specializing in technology topics."
# Define tasks
research_task = Task(
description="Provide a summary of the latest advancements in AI.",
expected_output="A detailed summary of recent AI advancements.",
agent=researcher,
)
writing_task = Task(
description="Write an article based on the research summary.",
expected_output="An engaging article on AI advancements.",
agent=writer,
)
# Create the crew
crew = Crew(
agents=[researcher, writer],
tasks=[research_task, writing_task],
process=Process.sequential,
)
# Execute the crew
result = crew.kickoff()
# Assertions to verify the result
assert result is not None, "Crew execution did not return a result."
assert isinstance(result, CrewOutput), "Result is not an instance of CrewOutput."
assert (
"AI advancements" in result.raw
or "artificial intelligence" in result.raw.lower()
), "Result does not contain expected content."
def test_hierarchical_crew_with_manager(default_llm_config):
"""Test hierarchical crew execution with a manager agent."""
# Initialize agents using the default LLM config fixture
ceo = Agent(
role="CEO",
goal="Oversee the project and ensure quality deliverables.",
backstory="A seasoned executive with a keen eye for detail.",
llm=default_llm_config,
)
developer = Agent(
role="Developer",
goal="Implement software features as per requirements.",
backstory="An experienced software developer.",
llm=default_llm_config,
)
tester = Agent(
role="Tester",
goal="Test software features and report bugs.",
backstory="A meticulous QA engineer.",
llm=default_llm_config,
)
# Define tasks
development_task = Task(
description="Develop the new authentication feature.",
expected_output="Code implementation of the authentication feature.",
agent=developer,
)
testing_task = Task(
description="Test the authentication feature for vulnerabilities.",
expected_output="A report on any found bugs or vulnerabilities.",
agent=tester,
)
# Create the crew with hierarchical process
crew = Crew(
agents=[ceo, developer, tester],
tasks=[development_task, testing_task],
process=Process.hierarchical,
manager_agent=ceo,
)
# Execute the crew
result = crew.kickoff()
# Assertions to verify the result
assert result is not None, "Crew execution did not return a result."
assert isinstance(result, CrewOutput), "Result is not an instance of CrewOutput."
assert (
"authentication" in result.raw.lower()
), "Result does not contain expected content."
@pytest.mark.asyncio
async def test_asynchronous_task_execution(default_llm_config):
"""Test crew execution with asynchronous tasks."""
# Initialize agent
data_processor = Agent(
role="Data Processor",
goal="Process large datasets efficiently.",
backstory="An expert in data processing and analysis.",
llm=default_llm_config,
)
# Define tasks with async_execution=True
async_task1 = Task(
description="Process dataset A asynchronously.",
expected_output="Processed results of dataset A.",
agent=data_processor,
async_execution=True,
)
async_task2 = Task(
description="Process dataset B asynchronously.",
expected_output="Processed results of dataset B.",
agent=data_processor,
async_execution=True,
)
# Create the crew
crew = Crew(
agents=[data_processor],
tasks=[async_task1, async_task2],
process=Process.sequential,
)
# Execute the crew asynchronously
result = await crew.kickoff_async()
# Assertions to verify the result
assert result is not None, "Crew execution did not return a result."
assert isinstance(result, CrewOutput), "Result is not an instance of CrewOutput."
assert (
"dataset a" in result.raw.lower() or "dataset b" in result.raw.lower()
), "Result does not contain expected content."
def test_crew_with_conditional_task(default_llm_config):
"""Test crew execution that includes a conditional task."""
# Initialize agents
analyst = Agent(
role="Analyst",
goal="Analyze data and make decisions based on insights.",
backstory="A data analyst with experience in predictive modeling.",
llm=default_llm_config,
)
decision_maker = Agent(
role="Decision Maker",
goal="Make decisions based on analysis.",
backstory="An executive responsible for strategic decisions.",
llm=default_llm_config,
)
# Define tasks
analysis_task = Task(
description="Analyze the quarterly financial data.",
expected_output="A report highlighting key financial insights.",
agent=analyst,
)
decision_task = ConditionalTask(
description="If the profit margin is below 10%, recommend cost-cutting measures.",
expected_output="Recommendations for reducing costs.",
agent=decision_maker,
condition=lambda output: "profit margin below 10%" in output.lower(),
)
# Create the crew
crew = Crew(
agents=[analyst, decision_maker],
tasks=[analysis_task, decision_task],
process=Process.sequential,
)
# Execute the crew
result = crew.kickoff()
# Assertions to verify the result
assert result is not None, "Crew execution did not return a result."
assert isinstance(result, CrewOutput), "Result is not an instance of CrewOutput."
assert len(result.tasks_output) >= 1, "No tasks were executed."
def test_crew_with_output_file():
"""Test crew execution that writes output to a file."""
# Access the API key from environment variables
openai_api_key = os.environ.get("OPENAI_API_KEY")
assert openai_api_key, "OPENAI_API_KEY environment variable is not set."
# Create a temporary directory for output files
with tempfile.TemporaryDirectory() as tmpdirname:
# Initialize agent
content_creator = Agent(
role="Content Creator",
goal="Generate engaging blog content.",
backstory="A creative writer with a passion for storytelling.",
llm={"provider": "openai", "model": "gpt-4", "api_key": openai_api_key},
)
# Define task with output file
output_file_path = f"{tmpdirname}/blog_post.txt"
blog_task = Task(
description="Write a blog post about the benefits of remote work.",
expected_output="An informative and engaging blog post.",
agent=content_creator,
output_file=output_file_path,
)
# Create the crew
crew = Crew(
agents=[content_creator],
tasks=[blog_task],
process=Process.sequential,
)
# Execute the crew
crew.kickoff()
# Assertions to verify the result
assert os.path.exists(output_file_path), "Output file was not created."
# Read the content from the file and perform assertions
with open(output_file_path, "r") as file:
content = file.read()
assert (
"remote work" in content.lower()
), "Output file does not contain expected content."
def test_invalid_hierarchical_process():
"""Test that an error is raised when using hierarchical process without a manager agent or manager_llm."""
with pytest.raises(ValueError) as exc_info:
Crew(
agents=[],
tasks=[],
process=Process.hierarchical, # Hierarchical process without a manager
)
assert "manager_llm or manager_agent is required" in str(exc_info.value)
def test_crew_with_memory(memory_agent, memory_tasks):
"""Test crew execution utilizing memory."""
# Enable memory in the crew
crew = Crew(
agents=[memory_agent],
tasks=memory_tasks,
process=Process.sequential,
memory=True, # Enable memory
)
# Execute the crew
result = crew.kickoff()
# Assertions to verify the result
assert result is not None, "Crew execution did not return a result."
assert isinstance(result, CrewOutput), "Result is not an instance of CrewOutput."
assert (
"history of ai" in result.raw.lower() and "future of ai" in result.raw.lower()
), "Result does not contain expected content."

View File

@@ -0,0 +1,103 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are Test Role. Test Backstory\nYour
personal goal is: Test Goal\nTo give my best complete final answer to the task
use the exact following format:\n\nThought: I now can give a great answer\nFinal
Answer: Your final answer must be the great and the most complete as possible,
it must be outcome described.\n\nI MUST use these formats, my job depends on
it!"}, {"role": "user", "content": "\nCurrent Task: Return: Test output\n\nThis
is the expect criteria for your final answer: Test output\nyou MUST return the
actual complete content as the final answer, not a summary.\n\nBegin! This is
VERY important to you, use the tools available and give your best Final Answer,
your job depends on it!\n\nThought:"}], "model": "gpt-4o"}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '776'
content-type:
- application/json
cookie:
- __cf_bm=9.8sBYBkvBR8R1K_bVF7xgU..80XKlEIg3N2OBbTSCU-1727214102-1.0.1.1-.qiTLXbPamYUMSuyNsOEB9jhGu.jOifujOrx9E2JZvStbIZ9RTIiE44xKKNfLPxQkOi6qAT3h6htK8lPDGV_5g;
_cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.47.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.47.0
x-stainless-raw-response:
- 'true'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-AB7fr4aPstiFUArxwxTVdfJSFwxsC\",\n \"object\":
\"chat.completion\",\n \"created\": 1727214471,\n \"model\": \"gpt-4o-2024-05-13\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
Answer: Test output\",\n \"refusal\": null\n },\n \"logprobs\":
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
155,\n \"completion_tokens\": 15,\n \"total_tokens\": 170,\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_52a7f40b0b\"\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8c85f9a91e311cf3-GRU
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 24 Sep 2024 21:47:51 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '216'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '30000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '29999817'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_88b1376917b345c976fdb03a55f7b6c1
http_version: HTTP/1.1
status_code: 200
version: 1