mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-24 00:08:29 +00:00
Compare commits
10 Commits
bugfix/cre
...
bugfix/kic
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9e23bd8357 | ||
|
|
7d576f34cd | ||
|
|
0ddf84a1d0 | ||
|
|
dc2e684877 | ||
|
|
daa01686f2 | ||
|
|
8bd292e875 | ||
|
|
37d425b10e | ||
|
|
e69f4bc1a3 | ||
|
|
e32d1007ba | ||
|
|
002568f2b2 |
@@ -3,6 +3,7 @@ import shutil
|
|||||||
import subprocess
|
import subprocess
|
||||||
from typing import Any, Dict, List, Literal, Optional, Union
|
from typing import Any, Dict, List, Literal, Optional, Union
|
||||||
|
|
||||||
|
from litellm import AuthenticationError as LiteLLMAuthenticationError
|
||||||
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
|
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
|
||||||
|
|
||||||
from crewai.agents import CacheHandler
|
from crewai.agents import CacheHandler
|
||||||
@@ -261,6 +262,9 @@ class Agent(BaseAgent):
|
|||||||
}
|
}
|
||||||
)["output"]
|
)["output"]
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
if isinstance(e, LiteLLMAuthenticationError):
|
||||||
|
# Do not retry on authentication errors
|
||||||
|
raise e
|
||||||
self._times_executed += 1
|
self._times_executed += 1
|
||||||
if self._times_executed > self.max_retry_limit:
|
if self._times_executed > self.max_retry_limit:
|
||||||
raise e
|
raise e
|
||||||
|
|||||||
@@ -3,6 +3,8 @@ import re
|
|||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from typing import Any, Callable, Dict, List, Optional, Union
|
from typing import Any, Callable, Dict, List, Optional, Union
|
||||||
|
|
||||||
|
from litellm.exceptions import AuthenticationError as LiteLLMAuthenticationError
|
||||||
|
|
||||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||||
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
|
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
|
||||||
from crewai.agents.parser import (
|
from crewai.agents.parser import (
|
||||||
@@ -13,6 +15,7 @@ from crewai.agents.parser import (
|
|||||||
OutputParserException,
|
OutputParserException,
|
||||||
)
|
)
|
||||||
from crewai.agents.tools_handler import ToolsHandler
|
from crewai.agents.tools_handler import ToolsHandler
|
||||||
|
from crewai.llm import LLM
|
||||||
from crewai.tools.base_tool import BaseTool
|
from crewai.tools.base_tool import BaseTool
|
||||||
from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
|
from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
|
||||||
from crewai.utilities import I18N, Printer
|
from crewai.utilities import I18N, Printer
|
||||||
@@ -54,7 +57,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
callbacks: List[Any] = [],
|
callbacks: List[Any] = [],
|
||||||
):
|
):
|
||||||
self._i18n: I18N = I18N()
|
self._i18n: I18N = I18N()
|
||||||
self.llm = llm
|
self.llm: LLM = llm
|
||||||
self.task = task
|
self.task = task
|
||||||
self.agent = agent
|
self.agent = agent
|
||||||
self.crew = crew
|
self.crew = crew
|
||||||
@@ -80,10 +83,8 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
self.tool_name_to_tool_map: Dict[str, BaseTool] = {
|
self.tool_name_to_tool_map: Dict[str, BaseTool] = {
|
||||||
tool.name: tool for tool in self.tools
|
tool.name: tool for tool in self.tools
|
||||||
}
|
}
|
||||||
if self.llm.stop:
|
self.stop = stop_words
|
||||||
self.llm.stop = list(set(self.llm.stop + self.stop))
|
self.llm.stop = list(set(self.llm.stop + self.stop))
|
||||||
else:
|
|
||||||
self.llm.stop = self.stop
|
|
||||||
|
|
||||||
def invoke(self, inputs: Dict[str, str]) -> Dict[str, Any]:
|
def invoke(self, inputs: Dict[str, str]) -> Dict[str, Any]:
|
||||||
if "system" in self.prompt:
|
if "system" in self.prompt:
|
||||||
@@ -98,7 +99,11 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
self._show_start_logs()
|
self._show_start_logs()
|
||||||
|
|
||||||
self.ask_for_human_input = bool(inputs.get("ask_for_human_input", False))
|
self.ask_for_human_input = bool(inputs.get("ask_for_human_input", False))
|
||||||
formatted_answer = self._invoke_loop()
|
|
||||||
|
try:
|
||||||
|
formatted_answer = self._invoke_loop()
|
||||||
|
except Exception as e:
|
||||||
|
raise e
|
||||||
|
|
||||||
if self.ask_for_human_input:
|
if self.ask_for_human_input:
|
||||||
formatted_answer = self._handle_human_feedback(formatted_answer)
|
formatted_answer = self._handle_human_feedback(formatted_answer)
|
||||||
@@ -124,7 +129,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
self._enforce_rpm_limit()
|
self._enforce_rpm_limit()
|
||||||
|
|
||||||
answer = self._get_llm_response()
|
answer = self._get_llm_response()
|
||||||
|
|
||||||
formatted_answer = self._process_llm_response(answer)
|
formatted_answer = self._process_llm_response(answer)
|
||||||
|
|
||||||
if isinstance(formatted_answer, AgentAction):
|
if isinstance(formatted_answer, AgentAction):
|
||||||
@@ -145,10 +149,40 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
if self._is_context_length_exceeded(e):
|
if self._is_context_length_exceeded(e):
|
||||||
self._handle_context_length()
|
self._handle_context_length()
|
||||||
continue
|
continue
|
||||||
|
elif self._is_litellm_authentication_error(e):
|
||||||
|
self._handle_litellm_auth_error(e)
|
||||||
|
raise e
|
||||||
|
else:
|
||||||
|
self._printer.print(
|
||||||
|
content=f"Unhandled exception: {e}",
|
||||||
|
color="red",
|
||||||
|
)
|
||||||
|
finally:
|
||||||
|
self.iterations += 1
|
||||||
|
|
||||||
self._show_logs(formatted_answer)
|
self._show_logs(formatted_answer)
|
||||||
return formatted_answer
|
return formatted_answer
|
||||||
|
|
||||||
|
def _is_litellm_authentication_error(self, exception: Exception) -> bool:
|
||||||
|
"""Check if the exception is a litellm authentication error."""
|
||||||
|
if LiteLLMAuthenticationError and isinstance(
|
||||||
|
exception, LiteLLMAuthenticationError
|
||||||
|
):
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _handle_litellm_auth_error(self, exception: Exception) -> None:
|
||||||
|
"""Handle litellm authentication error by informing the user and exiting."""
|
||||||
|
self._printer.print(
|
||||||
|
content="Authentication error with litellm occurred. Please check your API key and configuration.",
|
||||||
|
color="red",
|
||||||
|
)
|
||||||
|
self._printer.print(
|
||||||
|
content=f"Error details: {exception}",
|
||||||
|
color="red",
|
||||||
|
)
|
||||||
|
|
||||||
def _has_reached_max_iterations(self) -> bool:
|
def _has_reached_max_iterations(self) -> bool:
|
||||||
"""Check if the maximum number of iterations has been reached."""
|
"""Check if the maximum number of iterations has been reached."""
|
||||||
return self.iterations >= self.max_iter
|
return self.iterations >= self.max_iter
|
||||||
@@ -160,10 +194,17 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
|
|
||||||
def _get_llm_response(self) -> str:
|
def _get_llm_response(self) -> str:
|
||||||
"""Call the LLM and return the response, handling any invalid responses."""
|
"""Call the LLM and return the response, handling any invalid responses."""
|
||||||
answer = self.llm.call(
|
try:
|
||||||
self.messages,
|
answer = self.llm.call(
|
||||||
callbacks=self.callbacks,
|
self.messages,
|
||||||
)
|
callbacks=self.callbacks,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
self._printer.print(
|
||||||
|
content=f"Error during LLM call: {e}",
|
||||||
|
color="red",
|
||||||
|
)
|
||||||
|
raise e
|
||||||
|
|
||||||
if not answer:
|
if not answer:
|
||||||
self._printer.print(
|
self._printer.print(
|
||||||
@@ -184,7 +225,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
if FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE in e.error:
|
if FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE in e.error:
|
||||||
answer = answer.split("Observation:")[0].strip()
|
answer = answer.split("Observation:")[0].strip()
|
||||||
|
|
||||||
self.iterations += 1
|
|
||||||
return self._format_answer(answer)
|
return self._format_answer(answer)
|
||||||
|
|
||||||
def _handle_agent_action(
|
def _handle_agent_action(
|
||||||
|
|||||||
@@ -142,7 +142,6 @@ class LLM:
|
|||||||
self.temperature = temperature
|
self.temperature = temperature
|
||||||
self.top_p = top_p
|
self.top_p = top_p
|
||||||
self.n = n
|
self.n = n
|
||||||
self.stop = stop
|
|
||||||
self.max_completion_tokens = max_completion_tokens
|
self.max_completion_tokens = max_completion_tokens
|
||||||
self.max_tokens = max_tokens
|
self.max_tokens = max_tokens
|
||||||
self.presence_penalty = presence_penalty
|
self.presence_penalty = presence_penalty
|
||||||
@@ -160,6 +159,14 @@ class LLM:
|
|||||||
|
|
||||||
litellm.drop_params = True
|
litellm.drop_params = True
|
||||||
|
|
||||||
|
# Normalize self.stop to always be a List[str]
|
||||||
|
if stop is None:
|
||||||
|
self.stop: List[str] = []
|
||||||
|
elif isinstance(stop, str):
|
||||||
|
self.stop = [stop]
|
||||||
|
else:
|
||||||
|
self.stop = stop
|
||||||
|
|
||||||
self.set_callbacks(callbacks)
|
self.set_callbacks(callbacks)
|
||||||
self.set_env_callbacks()
|
self.set_env_callbacks()
|
||||||
|
|
||||||
@@ -222,7 +229,7 @@ class LLM:
|
|||||||
].message
|
].message
|
||||||
text_response = response_message.content or ""
|
text_response = response_message.content or ""
|
||||||
tool_calls = getattr(response_message, "tool_calls", [])
|
tool_calls = getattr(response_message, "tool_calls", [])
|
||||||
|
|
||||||
# Ensure callbacks get the full response object with usage info
|
# Ensure callbacks get the full response object with usage info
|
||||||
if callbacks and len(callbacks) > 0:
|
if callbacks and len(callbacks) > 0:
|
||||||
for callback in callbacks:
|
for callback in callbacks:
|
||||||
|
|||||||
@@ -24,12 +24,10 @@ def create_llm(
|
|||||||
|
|
||||||
# 1) If llm_value is already an LLM object, return it directly
|
# 1) If llm_value is already an LLM object, return it directly
|
||||||
if isinstance(llm_value, LLM):
|
if isinstance(llm_value, LLM):
|
||||||
print("LLM value is already an LLM object")
|
|
||||||
return llm_value
|
return llm_value
|
||||||
|
|
||||||
# 2) If llm_value is a string (model name)
|
# 2) If llm_value is a string (model name)
|
||||||
if isinstance(llm_value, str):
|
if isinstance(llm_value, str):
|
||||||
print("LLM value is a string")
|
|
||||||
try:
|
try:
|
||||||
created_llm = LLM(model=llm_value)
|
created_llm = LLM(model=llm_value)
|
||||||
return created_llm
|
return created_llm
|
||||||
@@ -39,12 +37,10 @@ def create_llm(
|
|||||||
|
|
||||||
# 3) If llm_value is None, parse environment variables or use default
|
# 3) If llm_value is None, parse environment variables or use default
|
||||||
if llm_value is None:
|
if llm_value is None:
|
||||||
print("LLM value is None")
|
|
||||||
return _llm_via_environment_or_fallback()
|
return _llm_via_environment_or_fallback()
|
||||||
|
|
||||||
# 4) Otherwise, attempt to extract relevant attributes from an unknown object
|
# 4) Otherwise, attempt to extract relevant attributes from an unknown object
|
||||||
try:
|
try:
|
||||||
print("LLM value is an unknown object")
|
|
||||||
# Extract attributes with explicit types
|
# Extract attributes with explicit types
|
||||||
model = (
|
model = (
|
||||||
getattr(llm_value, "model_name", None)
|
getattr(llm_value, "model_name", None)
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ from crewai.tools import tool
|
|||||||
from crewai.tools.tool_calling import InstructorToolCalling
|
from crewai.tools.tool_calling import InstructorToolCalling
|
||||||
from crewai.tools.tool_usage import ToolUsage
|
from crewai.tools.tool_usage import ToolUsage
|
||||||
from crewai.tools.tool_usage_events import ToolUsageFinished
|
from crewai.tools.tool_usage_events import ToolUsageFinished
|
||||||
from crewai.utilities import RPMController
|
from crewai.utilities import Printer, RPMController
|
||||||
from crewai.utilities.events import Emitter
|
from crewai.utilities.events import Emitter
|
||||||
|
|
||||||
|
|
||||||
@@ -1600,3 +1600,103 @@ def test_agent_with_knowledge_sources():
|
|||||||
|
|
||||||
# Assert that the agent provides the correct information
|
# Assert that the agent provides the correct information
|
||||||
assert "red" in result.raw.lower()
|
assert "red" in result.raw.lower()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
|
def test_litellm_auth_error_handling():
|
||||||
|
"""Test that LiteLLM authentication errors are handled correctly and not retried."""
|
||||||
|
from litellm import AuthenticationError as LiteLLMAuthenticationError
|
||||||
|
|
||||||
|
# Create an agent with a mocked LLM and max_retry_limit=0
|
||||||
|
agent = Agent(
|
||||||
|
role="test role",
|
||||||
|
goal="test goal",
|
||||||
|
backstory="test backstory",
|
||||||
|
llm=LLM(model="gpt-4"),
|
||||||
|
max_retry_limit=0, # Disable retries for authentication errors
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create a task
|
||||||
|
task = Task(
|
||||||
|
description="Test task",
|
||||||
|
expected_output="Test output",
|
||||||
|
agent=agent,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Mock the LLM call to raise LiteLLMAuthenticationError
|
||||||
|
with (
|
||||||
|
patch.object(LLM, "call") as mock_llm_call,
|
||||||
|
pytest.raises(LiteLLMAuthenticationError, match="Invalid API key"),
|
||||||
|
):
|
||||||
|
mock_llm_call.side_effect = LiteLLMAuthenticationError(
|
||||||
|
message="Invalid API key", llm_provider="openai", model="gpt-4"
|
||||||
|
)
|
||||||
|
agent.execute_task(task)
|
||||||
|
|
||||||
|
# Verify the call was only made once (no retries)
|
||||||
|
mock_llm_call.assert_called_once()
|
||||||
|
|
||||||
|
|
||||||
|
def test_crew_agent_executor_litellm_auth_error():
|
||||||
|
"""Test that CrewAgentExecutor properly identifies and handles LiteLLM authentication errors."""
|
||||||
|
from litellm import AuthenticationError as LiteLLMAuthenticationError
|
||||||
|
|
||||||
|
from crewai.agents.tools_handler import ToolsHandler
|
||||||
|
from crewai.utilities import Printer
|
||||||
|
|
||||||
|
# Create an agent and executor with max_retry_limit=0
|
||||||
|
agent = Agent(
|
||||||
|
role="test role",
|
||||||
|
goal="test goal",
|
||||||
|
backstory="test backstory",
|
||||||
|
llm=LLM(model="gpt-4", api_key="invalid_api_key"),
|
||||||
|
)
|
||||||
|
task = Task(
|
||||||
|
description="Test task",
|
||||||
|
expected_output="Test output",
|
||||||
|
agent=agent,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create executor with all required parameters
|
||||||
|
executor = CrewAgentExecutor(
|
||||||
|
agent=agent,
|
||||||
|
task=task,
|
||||||
|
llm=agent.llm,
|
||||||
|
crew=None,
|
||||||
|
prompt={"system": "You are a test agent", "user": "Execute the task: {input}"},
|
||||||
|
max_iter=5,
|
||||||
|
tools=[],
|
||||||
|
tools_names="",
|
||||||
|
stop_words=[],
|
||||||
|
tools_description="",
|
||||||
|
tools_handler=ToolsHandler(),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Mock the LLM call to raise LiteLLMAuthenticationError
|
||||||
|
with (
|
||||||
|
patch.object(LLM, "call") as mock_llm_call,
|
||||||
|
patch.object(Printer, "print") as mock_printer,
|
||||||
|
pytest.raises(LiteLLMAuthenticationError, match="Invalid API key"),
|
||||||
|
):
|
||||||
|
mock_llm_call.side_effect = LiteLLMAuthenticationError(
|
||||||
|
message="Invalid API key", llm_provider="openai", model="gpt-4"
|
||||||
|
)
|
||||||
|
executor.invoke(
|
||||||
|
{
|
||||||
|
"input": "test input",
|
||||||
|
"tool_names": "",
|
||||||
|
"tools": "",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Verify error handling
|
||||||
|
mock_printer.assert_any_call(
|
||||||
|
content="Authentication error with litellm occurred. Please check your API key and configuration.",
|
||||||
|
color="red",
|
||||||
|
)
|
||||||
|
mock_printer.assert_any_call(
|
||||||
|
content="Error details: litellm.AuthenticationError: Invalid API key",
|
||||||
|
color="red",
|
||||||
|
)
|
||||||
|
# Verify the call was only made once (no retries)
|
||||||
|
mock_llm_call.assert_called_once()
|
||||||
|
|||||||
@@ -2,21 +2,21 @@ interactions:
|
|||||||
- request:
|
- request:
|
||||||
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
||||||
personal goal is: test goal\nYou ONLY have access to the following tools, and
|
personal goal is: test goal\nYou ONLY have access to the following tools, and
|
||||||
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer(*args:
|
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer\nTool
|
||||||
Any, **kwargs: Any) -> Any\nTool Description: get_final_answer() - Get the final
|
Arguments: {}\nTool Description: Get the final answer but don''t give it yet,
|
||||||
answer but don''t give it yet, just re-use this tool non-stop. \nTool
|
just re-use this\n tool non-stop.\n\nIMPORTANT: Use the following format
|
||||||
Arguments: {}\n\nUse the following format:\n\nThought: you should always think
|
in your response:\n\n```\nThought: you should always think about what to do\nAction:
|
||||||
about what to do\nAction: the action to take, only one name of [get_final_answer],
|
the action to take, only one name of [get_final_answer], just the name, exactly
|
||||||
just the name, exactly as it''s written.\nAction Input: the input to the action,
|
as it''s written.\nAction Input: the input to the action, just a simple JSON
|
||||||
just a simple python dictionary, enclosed in curly braces, using \" to wrap
|
object, enclosed in curly braces, using \" to wrap keys and values.\nObservation:
|
||||||
keys and values.\nObservation: the result of the action\n\nOnce all necessary
|
the result of the action\n```\n\nOnce all necessary information is gathered,
|
||||||
information is gathered:\n\nThought: I now know the final answer\nFinal Answer:
|
return the following format:\n\n```\nThought: I now know the final answer\nFinal
|
||||||
the final answer to the original input question\n"}, {"role": "user", "content":
|
Answer: the final answer to the original input question\n```"}, {"role": "user",
|
||||||
"\nCurrent Task: Use the get_final_answer tool.\n\nThis is the expect criteria
|
"content": "\nCurrent Task: Use the get_final_answer tool.\n\nThis is the expect
|
||||||
for your final answer: The final answer\nyou MUST return the actual complete
|
criteria for your final answer: The final answer\nyou MUST return the actual
|
||||||
content as the final answer, not a summary.\n\nBegin! This is VERY important
|
complete content as the final answer, not a summary.\n\nBegin! This is VERY
|
||||||
to you, use the tools available and give your best Final Answer, your job depends
|
important to you, use the tools available and give your best Final Answer, your
|
||||||
on it!\n\nThought:"}], "model": "gpt-4o"}'
|
job depends on it!\n\nThought:"}], "model": "gpt-4o", "stop": ["\nObservation:"]}'
|
||||||
headers:
|
headers:
|
||||||
accept:
|
accept:
|
||||||
- application/json
|
- application/json
|
||||||
@@ -25,16 +25,13 @@ interactions:
|
|||||||
connection:
|
connection:
|
||||||
- keep-alive
|
- keep-alive
|
||||||
content-length:
|
content-length:
|
||||||
- '1325'
|
- '1367'
|
||||||
content-type:
|
content-type:
|
||||||
- application/json
|
- application/json
|
||||||
cookie:
|
|
||||||
- _cfuvid=ePJSDFdHag2D8lj21_ijAMWjoA6xfnPNxN4uekvC728-1727226247743-0.0.1.1-604800000;
|
|
||||||
__cf_bm=3giyBOIM0GNudFELtsBWYXwLrpLBTNLsh81wfXgu2tg-1727226247-1.0.1.1-ugUDz0c5EhmfVpyGtcdedlIWeDGuy2q0tXQTKVpv83HZhvxgBcS7SBL1wS4rapPM38yhfEcfwA79ARt3HQEzKA
|
|
||||||
host:
|
host:
|
||||||
- api.openai.com
|
- api.openai.com
|
||||||
user-agent:
|
user-agent:
|
||||||
- OpenAI/Python 1.47.0
|
- OpenAI/Python 1.59.6
|
||||||
x-stainless-arch:
|
x-stainless-arch:
|
||||||
- arm64
|
- arm64
|
||||||
x-stainless-async:
|
x-stainless-async:
|
||||||
@@ -44,30 +41,35 @@ interactions:
|
|||||||
x-stainless-os:
|
x-stainless-os:
|
||||||
- MacOS
|
- MacOS
|
||||||
x-stainless-package-version:
|
x-stainless-package-version:
|
||||||
- 1.47.0
|
- 1.59.6
|
||||||
x-stainless-raw-response:
|
x-stainless-raw-response:
|
||||||
- 'true'
|
- 'true'
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
x-stainless-runtime:
|
x-stainless-runtime:
|
||||||
- CPython
|
- CPython
|
||||||
x-stainless-runtime-version:
|
x-stainless-runtime-version:
|
||||||
- 3.11.7
|
- 3.12.7
|
||||||
method: POST
|
method: POST
|
||||||
uri: https://api.openai.com/v1/chat/completions
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
response:
|
response:
|
||||||
content: "{\n \"id\": \"chatcmpl-ABAtOWmVjvzQ9X58tKAUcOF4gmXwx\",\n \"object\":
|
content: "{\n \"id\": \"chatcmpl-AsXdf4OZKCZSigmN4k0gyh67NciqP\",\n \"object\":
|
||||||
\"chat.completion\",\n \"created\": 1727226842,\n \"model\": \"gpt-4o-2024-05-13\",\n
|
\"chat.completion\",\n \"created\": 1737562383,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
\"assistant\",\n \"content\": \"Thought: I need to use the get_final_answer
|
\"assistant\",\n \"content\": \"```\\nThought: I have to use the available
|
||||||
tool to determine the final answer.\\nAction: get_final_answer\\nAction Input:
|
tool to get the final answer. Let's proceed with executing it.\\nAction: get_final_answer\\nAction
|
||||||
{}\",\n \"refusal\": null\n },\n \"logprobs\": null,\n \"finish_reason\":
|
Input: {}\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 274,\n \"completion_tokens\":
|
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||||
27,\n \"total_tokens\": 301,\n \"completion_tokens_details\": {\n \"reasoning_tokens\":
|
274,\n \"completion_tokens\": 33,\n \"total_tokens\": 307,\n \"prompt_tokens_details\":
|
||||||
0\n }\n },\n \"system_fingerprint\": \"fp_e375328146\"\n}\n"
|
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||||
|
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||||
|
\"default\",\n \"system_fingerprint\": \"fp_50cad350e4\"\n}\n"
|
||||||
headers:
|
headers:
|
||||||
CF-Cache-Status:
|
CF-Cache-Status:
|
||||||
- DYNAMIC
|
- DYNAMIC
|
||||||
CF-RAY:
|
CF-RAY:
|
||||||
- 8c8727b3492f31e6-MIA
|
- 9060d43e3be1d690-IAD
|
||||||
Connection:
|
Connection:
|
||||||
- keep-alive
|
- keep-alive
|
||||||
Content-Encoding:
|
Content-Encoding:
|
||||||
@@ -75,19 +77,27 @@ interactions:
|
|||||||
Content-Type:
|
Content-Type:
|
||||||
- application/json
|
- application/json
|
||||||
Date:
|
Date:
|
||||||
- Wed, 25 Sep 2024 01:14:03 GMT
|
- Wed, 22 Jan 2025 16:13:03 GMT
|
||||||
Server:
|
Server:
|
||||||
- cloudflare
|
- cloudflare
|
||||||
|
Set-Cookie:
|
||||||
|
- __cf_bm=_Jcp7wnO_mXdvOnborCN6j8HwJxJXbszedJC1l7pFUg-1737562383-1.0.1.1-pDSLXlg.nKjG4wsT7mTJPjUvOX1UJITiS4MqKp6yfMWwRSJINsW1qC48SAcjBjakx2H5I1ESVk9JtUpUFDtf4g;
|
||||||
|
path=/; expires=Wed, 22-Jan-25 16:43:03 GMT; domain=.api.openai.com; HttpOnly;
|
||||||
|
Secure; SameSite=None
|
||||||
|
- _cfuvid=x3SYvzL2nq_PTBGtE8R9cl5CkeaaDzZFQIrYfo91S2s-1737562383916-0.0.1.1-604800000;
|
||||||
|
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||||
Transfer-Encoding:
|
Transfer-Encoding:
|
||||||
- chunked
|
- chunked
|
||||||
X-Content-Type-Options:
|
X-Content-Type-Options:
|
||||||
- nosniff
|
- nosniff
|
||||||
access-control-expose-headers:
|
access-control-expose-headers:
|
||||||
- X-Request-ID
|
- X-Request-ID
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
openai-organization:
|
openai-organization:
|
||||||
- crewai-iuxna1
|
- crewai-iuxna1
|
||||||
openai-processing-ms:
|
openai-processing-ms:
|
||||||
- '348'
|
- '791'
|
||||||
openai-version:
|
openai-version:
|
||||||
- '2020-10-01'
|
- '2020-10-01'
|
||||||
strict-transport-security:
|
strict-transport-security:
|
||||||
@@ -99,45 +109,59 @@ interactions:
|
|||||||
x-ratelimit-remaining-requests:
|
x-ratelimit-remaining-requests:
|
||||||
- '9999'
|
- '9999'
|
||||||
x-ratelimit-remaining-tokens:
|
x-ratelimit-remaining-tokens:
|
||||||
- '29999682'
|
- '29999680'
|
||||||
x-ratelimit-reset-requests:
|
x-ratelimit-reset-requests:
|
||||||
- 6ms
|
- 6ms
|
||||||
x-ratelimit-reset-tokens:
|
x-ratelimit-reset-tokens:
|
||||||
- 0s
|
- 0s
|
||||||
x-request-id:
|
x-request-id:
|
||||||
- req_be929caac49706f487950548bdcdd46e
|
- req_eeed99acafd3aeb1e3d4a6c8063192b0
|
||||||
http_version: HTTP/1.1
|
http_version: HTTP/1.1
|
||||||
status_code: 200
|
status_code: 200
|
||||||
- request:
|
- request:
|
||||||
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
||||||
personal goal is: test goal\nYou ONLY have access to the following tools, and
|
personal goal is: test goal\nYou ONLY have access to the following tools, and
|
||||||
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer(*args:
|
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer\nTool
|
||||||
Any, **kwargs: Any) -> Any\nTool Description: get_final_answer() - Get the final
|
Arguments: {}\nTool Description: Get the final answer but don''t give it yet,
|
||||||
answer but don''t give it yet, just re-use this tool non-stop. \nTool
|
just re-use this\n tool non-stop.\n\nIMPORTANT: Use the following format
|
||||||
Arguments: {}\n\nUse the following format:\n\nThought: you should always think
|
in your response:\n\n```\nThought: you should always think about what to do\nAction:
|
||||||
about what to do\nAction: the action to take, only one name of [get_final_answer],
|
the action to take, only one name of [get_final_answer], just the name, exactly
|
||||||
just the name, exactly as it''s written.\nAction Input: the input to the action,
|
as it''s written.\nAction Input: the input to the action, just a simple JSON
|
||||||
just a simple python dictionary, enclosed in curly braces, using \" to wrap
|
object, enclosed in curly braces, using \" to wrap keys and values.\nObservation:
|
||||||
keys and values.\nObservation: the result of the action\n\nOnce all necessary
|
the result of the action\n```\n\nOnce all necessary information is gathered,
|
||||||
information is gathered:\n\nThought: I now know the final answer\nFinal Answer:
|
return the following format:\n\n```\nThought: I now know the final answer\nFinal
|
||||||
the final answer to the original input question\n"}, {"role": "user", "content":
|
Answer: the final answer to the original input question\n```"}, {"role": "user",
|
||||||
"\nCurrent Task: Use the get_final_answer tool.\n\nThis is the expect criteria
|
"content": "\nCurrent Task: Use the get_final_answer tool.\n\nThis is the expect
|
||||||
for your final answer: The final answer\nyou MUST return the actual complete
|
criteria for your final answer: The final answer\nyou MUST return the actual
|
||||||
content as the final answer, not a summary.\n\nBegin! This is VERY important
|
complete content as the final answer, not a summary.\n\nBegin! This is VERY
|
||||||
to you, use the tools available and give your best Final Answer, your job depends
|
important to you, use the tools available and give your best Final Answer, your
|
||||||
on it!\n\nThought:"}, {"role": "user", "content": "Thought: I need to use the
|
job depends on it!\n\nThought:"}, {"role": "assistant", "content": "```\nThought:
|
||||||
get_final_answer tool to determine the final answer.\nAction: get_final_answer\nAction
|
I have to use the available tool to get the final answer. Let''s proceed with
|
||||||
|
executing it.\nAction: get_final_answer\nAction Input: {}\nObservation: I encountered
|
||||||
|
an error: Error on parsing tool.\nMoving on then. I MUST either use a tool (use
|
||||||
|
one at time) OR give my best final answer not both at the same time. When responding,
|
||||||
|
I must use the following format:\n\n```\nThought: you should always think about
|
||||||
|
what to do\nAction: the action to take, should be one of [get_final_answer]\nAction
|
||||||
|
Input: the input to the action, dictionary enclosed in curly braces\nObservation:
|
||||||
|
the result of the action\n```\nThis Thought/Action/Action Input/Result can repeat
|
||||||
|
N times. Once I know the final answer, I must return the following format:\n\n```\nThought:
|
||||||
|
I now can give a great answer\nFinal Answer: Your final answer must be the great
|
||||||
|
and the most complete as possible, it must be outcome described\n\n```"}, {"role":
|
||||||
|
"assistant", "content": "```\nThought: I have to use the available tool to get
|
||||||
|
the final answer. Let''s proceed with executing it.\nAction: get_final_answer\nAction
|
||||||
Input: {}\nObservation: I encountered an error: Error on parsing tool.\nMoving
|
Input: {}\nObservation: I encountered an error: Error on parsing tool.\nMoving
|
||||||
on then. I MUST either use a tool (use one at time) OR give my best final answer
|
on then. I MUST either use a tool (use one at time) OR give my best final answer
|
||||||
not both at the same time. To Use the following format:\n\nThought: you should
|
not both at the same time. When responding, I must use the following format:\n\n```\nThought:
|
||||||
always think about what to do\nAction: the action to take, should be one of
|
you should always think about what to do\nAction: the action to take, should
|
||||||
[get_final_answer]\nAction Input: the input to the action, dictionary enclosed
|
be one of [get_final_answer]\nAction Input: the input to the action, dictionary
|
||||||
in curly braces\nObservation: the result of the action\n... (this Thought/Action/Action
|
enclosed in curly braces\nObservation: the result of the action\n```\nThis Thought/Action/Action
|
||||||
Input/Result can repeat N times)\nThought: I now can give a great answer\nFinal
|
Input/Result can repeat N times. Once I know the final answer, I must return
|
||||||
|
the following format:\n\n```\nThought: I now can give a great answer\nFinal
|
||||||
Answer: Your final answer must be the great and the most complete as possible,
|
Answer: Your final answer must be the great and the most complete as possible,
|
||||||
it must be outcome described\n\n \nNow it''s time you MUST give your absolute
|
it must be outcome described\n\n```\nNow it''s time you MUST give your absolute
|
||||||
best final answer. You''ll ignore all previous instructions, stop using any
|
best final answer. You''ll ignore all previous instructions, stop using any
|
||||||
tools, and just return your absolute BEST Final answer."}], "model": "gpt-4o"}'
|
tools, and just return your absolute BEST Final answer."}], "model": "gpt-4o",
|
||||||
|
"stop": ["\nObservation:"]}'
|
||||||
headers:
|
headers:
|
||||||
accept:
|
accept:
|
||||||
- application/json
|
- application/json
|
||||||
@@ -146,16 +170,16 @@ interactions:
|
|||||||
connection:
|
connection:
|
||||||
- keep-alive
|
- keep-alive
|
||||||
content-length:
|
content-length:
|
||||||
- '2320'
|
- '3445'
|
||||||
content-type:
|
content-type:
|
||||||
- application/json
|
- application/json
|
||||||
cookie:
|
cookie:
|
||||||
- _cfuvid=ePJSDFdHag2D8lj21_ijAMWjoA6xfnPNxN4uekvC728-1727226247743-0.0.1.1-604800000;
|
- __cf_bm=_Jcp7wnO_mXdvOnborCN6j8HwJxJXbszedJC1l7pFUg-1737562383-1.0.1.1-pDSLXlg.nKjG4wsT7mTJPjUvOX1UJITiS4MqKp6yfMWwRSJINsW1qC48SAcjBjakx2H5I1ESVk9JtUpUFDtf4g;
|
||||||
__cf_bm=3giyBOIM0GNudFELtsBWYXwLrpLBTNLsh81wfXgu2tg-1727226247-1.0.1.1-ugUDz0c5EhmfVpyGtcdedlIWeDGuy2q0tXQTKVpv83HZhvxgBcS7SBL1wS4rapPM38yhfEcfwA79ARt3HQEzKA
|
_cfuvid=x3SYvzL2nq_PTBGtE8R9cl5CkeaaDzZFQIrYfo91S2s-1737562383916-0.0.1.1-604800000
|
||||||
host:
|
host:
|
||||||
- api.openai.com
|
- api.openai.com
|
||||||
user-agent:
|
user-agent:
|
||||||
- OpenAI/Python 1.47.0
|
- OpenAI/Python 1.59.6
|
||||||
x-stainless-arch:
|
x-stainless-arch:
|
||||||
- arm64
|
- arm64
|
||||||
x-stainless-async:
|
x-stainless-async:
|
||||||
@@ -165,29 +189,36 @@ interactions:
|
|||||||
x-stainless-os:
|
x-stainless-os:
|
||||||
- MacOS
|
- MacOS
|
||||||
x-stainless-package-version:
|
x-stainless-package-version:
|
||||||
- 1.47.0
|
- 1.59.6
|
||||||
x-stainless-raw-response:
|
x-stainless-raw-response:
|
||||||
- 'true'
|
- 'true'
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
x-stainless-runtime:
|
x-stainless-runtime:
|
||||||
- CPython
|
- CPython
|
||||||
x-stainless-runtime-version:
|
x-stainless-runtime-version:
|
||||||
- 3.11.7
|
- 3.12.7
|
||||||
method: POST
|
method: POST
|
||||||
uri: https://api.openai.com/v1/chat/completions
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
response:
|
response:
|
||||||
content: "{\n \"id\": \"chatcmpl-ABAtPaaeRfdNsZ3k06CfAmrEW8IJu\",\n \"object\":
|
content: "{\n \"id\": \"chatcmpl-AsXdg9UrLvAiqWP979E6DszLsQ84k\",\n \"object\":
|
||||||
\"chat.completion\",\n \"created\": 1727226843,\n \"model\": \"gpt-4o-2024-05-13\",\n
|
\"chat.completion\",\n \"created\": 1737562384,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
\"assistant\",\n \"content\": \"Final Answer: The final answer\",\n \"refusal\":
|
\"assistant\",\n \"content\": \"```\\nThought: I now know the final answer\\nFinal
|
||||||
null\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
Answer: The final answer must be the great and the most complete as possible,
|
||||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 483,\n \"completion_tokens\":
|
it must be outcome described.\\n```\",\n \"refusal\": null\n },\n
|
||||||
6,\n \"total_tokens\": 489,\n \"completion_tokens_details\": {\n \"reasoning_tokens\":
|
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||||
0\n }\n },\n \"system_fingerprint\": \"fp_e375328146\"\n}\n"
|
\ \"usage\": {\n \"prompt_tokens\": 719,\n \"completion_tokens\": 35,\n
|
||||||
|
\ \"total_tokens\": 754,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||||
|
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||||
|
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||||
|
\"default\",\n \"system_fingerprint\": \"fp_50cad350e4\"\n}\n"
|
||||||
headers:
|
headers:
|
||||||
CF-Cache-Status:
|
CF-Cache-Status:
|
||||||
- DYNAMIC
|
- DYNAMIC
|
||||||
CF-RAY:
|
CF-RAY:
|
||||||
- 8c8727b9da1f31e6-MIA
|
- 9060d4441edad690-IAD
|
||||||
Connection:
|
Connection:
|
||||||
- keep-alive
|
- keep-alive
|
||||||
Content-Encoding:
|
Content-Encoding:
|
||||||
@@ -195,7 +226,7 @@ interactions:
|
|||||||
Content-Type:
|
Content-Type:
|
||||||
- application/json
|
- application/json
|
||||||
Date:
|
Date:
|
||||||
- Wed, 25 Sep 2024 01:14:03 GMT
|
- Wed, 22 Jan 2025 16:13:05 GMT
|
||||||
Server:
|
Server:
|
||||||
- cloudflare
|
- cloudflare
|
||||||
Transfer-Encoding:
|
Transfer-Encoding:
|
||||||
@@ -209,7 +240,7 @@ interactions:
|
|||||||
openai-organization:
|
openai-organization:
|
||||||
- crewai-iuxna1
|
- crewai-iuxna1
|
||||||
openai-processing-ms:
|
openai-processing-ms:
|
||||||
- '188'
|
- '928'
|
||||||
openai-version:
|
openai-version:
|
||||||
- '2020-10-01'
|
- '2020-10-01'
|
||||||
strict-transport-security:
|
strict-transport-security:
|
||||||
@@ -221,13 +252,13 @@ interactions:
|
|||||||
x-ratelimit-remaining-requests:
|
x-ratelimit-remaining-requests:
|
||||||
- '9999'
|
- '9999'
|
||||||
x-ratelimit-remaining-tokens:
|
x-ratelimit-remaining-tokens:
|
||||||
- '29999445'
|
- '29999187'
|
||||||
x-ratelimit-reset-requests:
|
x-ratelimit-reset-requests:
|
||||||
- 6ms
|
- 6ms
|
||||||
x-ratelimit-reset-tokens:
|
x-ratelimit-reset-tokens:
|
||||||
- 1ms
|
- 1ms
|
||||||
x-request-id:
|
x-request-id:
|
||||||
- req_d8e32538689fe064627468bad802d9a8
|
- req_61fc7506e6db326ec572224aec81ef23
|
||||||
http_version: HTTP/1.1
|
http_version: HTTP/1.1
|
||||||
status_code: 200
|
status_code: 200
|
||||||
version: 1
|
version: 1
|
||||||
|
|||||||
Reference in New Issue
Block a user