mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-24 15:48:23 +00:00
Compare commits
4 Commits
c9cf47e6ff
...
bugfix/add
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
54acbc9d0e | ||
|
|
2131b94ddb | ||
|
|
b3504e768c | ||
|
|
350457b9b8 |
44
.github/workflows/tests.yml
vendored
44
.github/workflows/tests.yml
vendored
@@ -1,32 +1,60 @@
|
|||||||
name: Run Tests
|
name: Run Tests
|
||||||
|
|
||||||
on: [pull_request]
|
on:
|
||||||
|
pull_request:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: write
|
contents: write
|
||||||
|
|
||||||
env:
|
|
||||||
OPENAI_API_KEY: fake-api-key
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
tests:
|
tests:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
timeout-minutes: 15
|
timeout-minutes: 15
|
||||||
|
env:
|
||||||
|
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||||
|
MODEL: gpt-4o-mini
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Install uv
|
- name: Install UV
|
||||||
uses: astral-sh/setup-uv@v3
|
uses: astral-sh/setup-uv@v3
|
||||||
with:
|
with:
|
||||||
enable-cache: true
|
enable-cache: true
|
||||||
|
|
||||||
|
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
run: uv python install 3.12.8
|
run: uv python install 3.12.8
|
||||||
|
|
||||||
- name: Install the project
|
- name: Install the project
|
||||||
run: uv sync --dev --all-extras
|
run: uv sync --dev --all-extras
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run General Tests
|
||||||
run: uv run pytest tests -vv
|
run: uv run pytest tests -k "not main_branch_tests" -vv
|
||||||
|
|
||||||
|
main_branch_tests:
|
||||||
|
if: github.ref == 'refs/heads/main'
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: tests
|
||||||
|
timeout-minutes: 15
|
||||||
|
env:
|
||||||
|
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Install UV
|
||||||
|
uses: astral-sh/setup-uv@v3
|
||||||
|
with:
|
||||||
|
enable-cache: true
|
||||||
|
|
||||||
|
- name: Set up Python
|
||||||
|
run: uv python install 3.12.8
|
||||||
|
|
||||||
|
- name: Install the project
|
||||||
|
run: uv sync --dev --all-extras
|
||||||
|
|
||||||
|
- name: Run Main Branch Specific Tests
|
||||||
|
run: uv run pytest tests/main_branch_tests -vv
|
||||||
|
|||||||
@@ -101,6 +101,8 @@ from crewai_tools import SerperDevTool
|
|||||||
class LatestAiDevelopmentCrew():
|
class LatestAiDevelopmentCrew():
|
||||||
"""LatestAiDevelopment crew"""
|
"""LatestAiDevelopment crew"""
|
||||||
|
|
||||||
|
agents_config = "config/agents.yaml"
|
||||||
|
|
||||||
@agent
|
@agent
|
||||||
def researcher(self) -> Agent:
|
def researcher(self) -> Agent:
|
||||||
return Agent(
|
return Agent(
|
||||||
|
|||||||
@@ -19,15 +19,10 @@ class CrewAgentExecutorMixin:
|
|||||||
agent: Optional["BaseAgent"]
|
agent: Optional["BaseAgent"]
|
||||||
task: Optional["Task"]
|
task: Optional["Task"]
|
||||||
iterations: int
|
iterations: int
|
||||||
have_forced_answer: bool
|
|
||||||
max_iter: int
|
max_iter: int
|
||||||
_i18n: I18N
|
_i18n: I18N
|
||||||
_printer: Printer = Printer()
|
_printer: Printer = Printer()
|
||||||
|
|
||||||
def _should_force_answer(self) -> bool:
|
|
||||||
"""Determine if a forced answer is required based on iteration count."""
|
|
||||||
return self.iterations >= self.max_iter
|
|
||||||
|
|
||||||
def _create_short_term_memory(self, output) -> None:
|
def _create_short_term_memory(self, output) -> None:
|
||||||
"""Create and save a short-term memory item if conditions are met."""
|
"""Create and save a short-term memory item if conditions are met."""
|
||||||
if (
|
if (
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from typing import Any, Dict, List, Union
|
from typing import Any, Callable, Dict, List, Optional, Union
|
||||||
|
|
||||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||||
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
|
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
|
||||||
@@ -50,7 +50,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
original_tools: List[Any] = [],
|
original_tools: List[Any] = [],
|
||||||
function_calling_llm: Any = None,
|
function_calling_llm: Any = None,
|
||||||
respect_context_window: bool = False,
|
respect_context_window: bool = False,
|
||||||
request_within_rpm_limit: Any = None,
|
request_within_rpm_limit: Optional[Callable[[], bool]] = None,
|
||||||
callbacks: List[Any] = [],
|
callbacks: List[Any] = [],
|
||||||
):
|
):
|
||||||
self._i18n: I18N = I18N()
|
self._i18n: I18N = I18N()
|
||||||
@@ -77,7 +77,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
self.messages: List[Dict[str, str]] = []
|
self.messages: List[Dict[str, str]] = []
|
||||||
self.iterations = 0
|
self.iterations = 0
|
||||||
self.log_error_after = 3
|
self.log_error_after = 3
|
||||||
self.have_forced_answer = False
|
|
||||||
self.tool_name_to_tool_map: Dict[str, BaseTool] = {
|
self.tool_name_to_tool_map: Dict[str, BaseTool] = {
|
||||||
tool.name: tool for tool in self.tools
|
tool.name: tool for tool in self.tools
|
||||||
}
|
}
|
||||||
@@ -108,106 +107,151 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
self._create_long_term_memory(formatted_answer)
|
self._create_long_term_memory(formatted_answer)
|
||||||
return {"output": formatted_answer.output}
|
return {"output": formatted_answer.output}
|
||||||
|
|
||||||
def _invoke_loop(self, formatted_answer=None):
|
def _invoke_loop(self):
|
||||||
try:
|
"""
|
||||||
|
Main loop to invoke the agent's thought process until it reaches a conclusion
|
||||||
|
or the maximum number of iterations is reached.
|
||||||
|
"""
|
||||||
|
formatted_answer = None
|
||||||
while not isinstance(formatted_answer, AgentFinish):
|
while not isinstance(formatted_answer, AgentFinish):
|
||||||
if not self.request_within_rpm_limit or self.request_within_rpm_limit():
|
|
||||||
answer = self.llm.call(
|
|
||||||
self.messages,
|
|
||||||
callbacks=self.callbacks,
|
|
||||||
)
|
|
||||||
|
|
||||||
if answer is None or answer == "":
|
|
||||||
self._printer.print(
|
|
||||||
content="Received None or empty response from LLM call.",
|
|
||||||
color="red",
|
|
||||||
)
|
|
||||||
raise ValueError(
|
|
||||||
"Invalid response from LLM call - None or empty."
|
|
||||||
)
|
|
||||||
|
|
||||||
if not self.use_stop_words:
|
|
||||||
try:
|
try:
|
||||||
self._format_answer(answer)
|
if self._has_reached_max_iterations():
|
||||||
except OutputParserException as e:
|
formatted_answer = self._handle_max_iterations_exceeded(
|
||||||
if (
|
formatted_answer
|
||||||
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE
|
)
|
||||||
in e.error
|
break
|
||||||
):
|
|
||||||
answer = answer.split("Observation:")[0].strip()
|
|
||||||
|
|
||||||
self.iterations += 1
|
self._enforce_rpm_limit()
|
||||||
formatted_answer = self._format_answer(answer)
|
|
||||||
|
answer = self._get_llm_response()
|
||||||
|
|
||||||
|
formatted_answer = self._process_llm_response(answer)
|
||||||
|
|
||||||
if isinstance(formatted_answer, AgentAction):
|
if isinstance(formatted_answer, AgentAction):
|
||||||
tool_result = self._execute_tool_and_check_finality(
|
tool_result = self._execute_tool_and_check_finality(
|
||||||
formatted_answer
|
formatted_answer
|
||||||
)
|
)
|
||||||
|
formatted_answer = self._handle_agent_action(
|
||||||
|
formatted_answer, tool_result
|
||||||
|
)
|
||||||
|
|
||||||
# Directly append the result to the messages if the
|
self._invoke_step_callback(formatted_answer)
|
||||||
# tool is "Add image to content" in case of multimodal
|
self._append_message(formatted_answer.text, role="assistant")
|
||||||
# agents
|
|
||||||
if formatted_answer.tool == self._i18n.tools("add_image")["name"]:
|
except OutputParserException as e:
|
||||||
self.messages.append(tool_result.result)
|
formatted_answer = self._handle_output_parser_exception(e)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
if self._is_context_length_exceeded(e):
|
||||||
|
self._handle_context_length()
|
||||||
continue
|
continue
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
raise e
|
||||||
|
|
||||||
|
self._show_logs(formatted_answer)
|
||||||
|
return formatted_answer
|
||||||
|
|
||||||
|
def _has_reached_max_iterations(self) -> bool:
|
||||||
|
"""Check if the maximum number of iterations has been reached."""
|
||||||
|
return self.iterations >= self.max_iter
|
||||||
|
|
||||||
|
def _enforce_rpm_limit(self) -> None:
|
||||||
|
"""Enforce the requests per minute (RPM) limit if applicable."""
|
||||||
|
if self.request_within_rpm_limit:
|
||||||
|
self.request_within_rpm_limit()
|
||||||
|
|
||||||
|
def _get_llm_response(self) -> str:
|
||||||
|
"""Call the LLM and return the response, handling any invalid responses."""
|
||||||
|
answer = self.llm.call(
|
||||||
|
self.messages,
|
||||||
|
callbacks=self.callbacks,
|
||||||
|
)
|
||||||
|
|
||||||
|
if not answer:
|
||||||
|
self._printer.print(
|
||||||
|
content="Received None or empty response from LLM call.",
|
||||||
|
color="red",
|
||||||
|
)
|
||||||
|
raise ValueError("Invalid response from LLM call - None or empty.")
|
||||||
|
|
||||||
|
return answer
|
||||||
|
|
||||||
|
def _process_llm_response(self, answer: str) -> Union[AgentAction, AgentFinish]:
|
||||||
|
"""Process the LLM response and format it into an AgentAction or AgentFinish."""
|
||||||
|
if not self.use_stop_words:
|
||||||
|
try:
|
||||||
|
# Preliminary parsing to check for errors.
|
||||||
|
self._format_answer(answer)
|
||||||
|
except OutputParserException as e:
|
||||||
|
if FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE in e.error:
|
||||||
|
answer = answer.split("Observation:")[0].strip()
|
||||||
|
|
||||||
|
self.iterations += 1
|
||||||
|
return self._format_answer(answer)
|
||||||
|
|
||||||
|
def _handle_agent_action(
|
||||||
|
self, formatted_answer: AgentAction, tool_result: ToolResult
|
||||||
|
) -> Union[AgentAction, AgentFinish]:
|
||||||
|
"""Handle the AgentAction, execute tools, and process the results."""
|
||||||
|
add_image_tool = self._i18n.tools("add_image")
|
||||||
|
if (
|
||||||
|
isinstance(add_image_tool, dict)
|
||||||
|
and formatted_answer.tool.casefold().strip()
|
||||||
|
== add_image_tool.get("name", "").casefold().strip()
|
||||||
|
):
|
||||||
|
self.messages.append(tool_result.result)
|
||||||
|
return formatted_answer # Continue the loop
|
||||||
|
|
||||||
if self.step_callback:
|
if self.step_callback:
|
||||||
self.step_callback(tool_result)
|
self.step_callback(tool_result)
|
||||||
|
|
||||||
formatted_answer.text += f"\nObservation: {tool_result.result}"
|
formatted_answer.text += f"\nObservation: {tool_result.result}"
|
||||||
|
|
||||||
formatted_answer.result = tool_result.result
|
formatted_answer.result = tool_result.result
|
||||||
|
|
||||||
if tool_result.result_as_answer:
|
if tool_result.result_as_answer:
|
||||||
return AgentFinish(
|
return AgentFinish(
|
||||||
thought="",
|
thought="",
|
||||||
output=tool_result.result,
|
output=tool_result.result,
|
||||||
text=formatted_answer.text,
|
text=formatted_answer.text,
|
||||||
)
|
)
|
||||||
self._show_logs(formatted_answer)
|
|
||||||
|
|
||||||
|
self._show_logs(formatted_answer)
|
||||||
|
return formatted_answer
|
||||||
|
|
||||||
|
def _invoke_step_callback(self, formatted_answer) -> None:
|
||||||
|
"""Invoke the step callback if it exists."""
|
||||||
if self.step_callback:
|
if self.step_callback:
|
||||||
self.step_callback(formatted_answer)
|
self.step_callback(formatted_answer)
|
||||||
|
|
||||||
if self._should_force_answer():
|
def _append_message(self, text: str, role: str = "assistant") -> None:
|
||||||
if self.have_forced_answer:
|
"""Append a message to the message list with the given role."""
|
||||||
return AgentFinish(
|
self.messages.append(self._format_msg(text, role=role))
|
||||||
|
|
||||||
|
def _handle_output_parser_exception(self, e: OutputParserException) -> AgentAction:
|
||||||
|
"""Handle OutputParserException by updating messages and formatted_answer."""
|
||||||
|
self.messages.append({"role": "user", "content": e.error})
|
||||||
|
|
||||||
|
formatted_answer = AgentAction(
|
||||||
|
text=e.error,
|
||||||
|
tool="",
|
||||||
|
tool_input="",
|
||||||
thought="",
|
thought="",
|
||||||
output=self._i18n.errors(
|
|
||||||
"force_final_answer_error"
|
|
||||||
).format(formatted_answer.text),
|
|
||||||
text=formatted_answer.text,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
formatted_answer.text += (
|
|
||||||
f'\n{self._i18n.errors("force_final_answer")}'
|
|
||||||
)
|
|
||||||
self.have_forced_answer = True
|
|
||||||
self.messages.append(
|
|
||||||
self._format_msg(formatted_answer.text, role="assistant")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
except OutputParserException as e:
|
|
||||||
self.messages.append({"role": "user", "content": e.error})
|
|
||||||
if self.iterations > self.log_error_after:
|
if self.iterations > self.log_error_after:
|
||||||
self._printer.print(
|
self._printer.print(
|
||||||
content=f"Error parsing LLM output, agent will retry: {e.error}",
|
content=f"Error parsing LLM output, agent will retry: {e.error}",
|
||||||
color="red",
|
color="red",
|
||||||
)
|
)
|
||||||
return self._invoke_loop(formatted_answer)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
if LLMContextLengthExceededException(str(e))._is_context_limit_error(
|
|
||||||
str(e)
|
|
||||||
):
|
|
||||||
self._handle_context_length()
|
|
||||||
return self._invoke_loop(formatted_answer)
|
|
||||||
else:
|
|
||||||
raise e
|
|
||||||
|
|
||||||
self._show_logs(formatted_answer)
|
|
||||||
return formatted_answer
|
return formatted_answer
|
||||||
|
|
||||||
|
def _is_context_length_exceeded(self, exception: Exception) -> bool:
|
||||||
|
"""Check if the exception is due to context length exceeding."""
|
||||||
|
return LLMContextLengthExceededException(
|
||||||
|
str(exception)
|
||||||
|
)._is_context_limit_error(str(exception))
|
||||||
|
|
||||||
def _show_start_logs(self):
|
def _show_start_logs(self):
|
||||||
if self.agent is None:
|
if self.agent is None:
|
||||||
raise ValueError("Agent cannot be None")
|
raise ValueError("Agent cannot be None")
|
||||||
@@ -487,3 +531,45 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
self.ask_for_human_input = False
|
self.ask_for_human_input = False
|
||||||
|
|
||||||
return formatted_answer
|
return formatted_answer
|
||||||
|
|
||||||
|
def _handle_max_iterations_exceeded(self, formatted_answer):
|
||||||
|
"""
|
||||||
|
Handles the case when the maximum number of iterations is exceeded.
|
||||||
|
Performs one more LLM call to get the final answer.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
formatted_answer: The last formatted answer from the agent.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The final formatted answer after exceeding max iterations.
|
||||||
|
"""
|
||||||
|
self._printer.print(
|
||||||
|
content="Maximum iterations reached. Requesting final answer.",
|
||||||
|
color="yellow",
|
||||||
|
)
|
||||||
|
|
||||||
|
if formatted_answer and hasattr(formatted_answer, "text"):
|
||||||
|
assistant_message = (
|
||||||
|
formatted_answer.text + f'\n{self._i18n.errors("force_final_answer")}'
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
assistant_message = self._i18n.errors("force_final_answer")
|
||||||
|
|
||||||
|
self.messages.append(self._format_msg(assistant_message, role="assistant"))
|
||||||
|
|
||||||
|
# Perform one more LLM call to get the final answer
|
||||||
|
answer = self.llm.call(
|
||||||
|
self.messages,
|
||||||
|
callbacks=self.callbacks,
|
||||||
|
)
|
||||||
|
|
||||||
|
if answer is None or answer == "":
|
||||||
|
self._printer.print(
|
||||||
|
content="Received None or empty response from LLM call.",
|
||||||
|
color="red",
|
||||||
|
)
|
||||||
|
raise ValueError("Invalid response from LLM call - None or empty.")
|
||||||
|
|
||||||
|
formatted_answer = self._format_answer(answer)
|
||||||
|
# Return the formatted answer, regardless of its type
|
||||||
|
return formatted_answer
|
||||||
|
|||||||
@@ -17,6 +17,12 @@ ENV_VARS = {
|
|||||||
"key_name": "GEMINI_API_KEY",
|
"key_name": "GEMINI_API_KEY",
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
"nvidia_nim": [
|
||||||
|
{
|
||||||
|
"prompt": "Enter your NVIDIA API key (press Enter to skip)",
|
||||||
|
"key_name": "NVIDIA_NIM_API_KEY",
|
||||||
|
}
|
||||||
|
],
|
||||||
"groq": [
|
"groq": [
|
||||||
{
|
{
|
||||||
"prompt": "Enter your GROQ API key (press Enter to skip)",
|
"prompt": "Enter your GROQ API key (press Enter to skip)",
|
||||||
@@ -98,6 +104,7 @@ PROVIDERS = [
|
|||||||
"openai",
|
"openai",
|
||||||
"anthropic",
|
"anthropic",
|
||||||
"gemini",
|
"gemini",
|
||||||
|
"nvidia_nim",
|
||||||
"groq",
|
"groq",
|
||||||
"ollama",
|
"ollama",
|
||||||
"watson",
|
"watson",
|
||||||
@@ -121,6 +128,75 @@ MODELS = {
|
|||||||
"gemini/gemini-gemma-2-9b-it",
|
"gemini/gemini-gemma-2-9b-it",
|
||||||
"gemini/gemini-gemma-2-27b-it",
|
"gemini/gemini-gemma-2-27b-it",
|
||||||
],
|
],
|
||||||
|
"nvidia_nim": [
|
||||||
|
"nvidia_nim/nvidia/mistral-nemo-minitron-8b-8k-instruct",
|
||||||
|
"nvidia_nim/nvidia/nemotron-4-mini-hindi-4b-instruct",
|
||||||
|
"nvidia_nim/nvidia/llama-3.1-nemotron-70b-instruct",
|
||||||
|
"nvidia_nim/nvidia/llama3-chatqa-1.5-8b",
|
||||||
|
"nvidia_nim/nvidia/llama3-chatqa-1.5-70b",
|
||||||
|
"nvidia_nim/nvidia/vila",
|
||||||
|
"nvidia_nim/nvidia/neva-22",
|
||||||
|
"nvidia_nim/nvidia/nemotron-mini-4b-instruct",
|
||||||
|
"nvidia_nim/nvidia/usdcode-llama3-70b-instruct",
|
||||||
|
"nvidia_nim/nvidia/nemotron-4-340b-instruct",
|
||||||
|
"nvidia_nim/meta/codellama-70b",
|
||||||
|
"nvidia_nim/meta/llama2-70b",
|
||||||
|
"nvidia_nim/meta/llama3-8b-instruct",
|
||||||
|
"nvidia_nim/meta/llama3-70b-instruct",
|
||||||
|
"nvidia_nim/meta/llama-3.1-8b-instruct",
|
||||||
|
"nvidia_nim/meta/llama-3.1-70b-instruct",
|
||||||
|
"nvidia_nim/meta/llama-3.1-405b-instruct",
|
||||||
|
"nvidia_nim/meta/llama-3.2-1b-instruct",
|
||||||
|
"nvidia_nim/meta/llama-3.2-3b-instruct",
|
||||||
|
"nvidia_nim/meta/llama-3.2-11b-vision-instruct",
|
||||||
|
"nvidia_nim/meta/llama-3.2-90b-vision-instruct",
|
||||||
|
"nvidia_nim/meta/llama-3.1-70b-instruct",
|
||||||
|
"nvidia_nim/google/gemma-7b",
|
||||||
|
"nvidia_nim/google/gemma-2b",
|
||||||
|
"nvidia_nim/google/codegemma-7b",
|
||||||
|
"nvidia_nim/google/codegemma-1.1-7b",
|
||||||
|
"nvidia_nim/google/recurrentgemma-2b",
|
||||||
|
"nvidia_nim/google/gemma-2-9b-it",
|
||||||
|
"nvidia_nim/google/gemma-2-27b-it",
|
||||||
|
"nvidia_nim/google/gemma-2-2b-it",
|
||||||
|
"nvidia_nim/google/deplot",
|
||||||
|
"nvidia_nim/google/paligemma",
|
||||||
|
"nvidia_nim/mistralai/mistral-7b-instruct-v0.2",
|
||||||
|
"nvidia_nim/mistralai/mixtral-8x7b-instruct-v0.1",
|
||||||
|
"nvidia_nim/mistralai/mistral-large",
|
||||||
|
"nvidia_nim/mistralai/mixtral-8x22b-instruct-v0.1",
|
||||||
|
"nvidia_nim/mistralai/mistral-7b-instruct-v0.3",
|
||||||
|
"nvidia_nim/nv-mistralai/mistral-nemo-12b-instruct",
|
||||||
|
"nvidia_nim/mistralai/mamba-codestral-7b-v0.1",
|
||||||
|
"nvidia_nim/microsoft/phi-3-mini-128k-instruct",
|
||||||
|
"nvidia_nim/microsoft/phi-3-mini-4k-instruct",
|
||||||
|
"nvidia_nim/microsoft/phi-3-small-8k-instruct",
|
||||||
|
"nvidia_nim/microsoft/phi-3-small-128k-instruct",
|
||||||
|
"nvidia_nim/microsoft/phi-3-medium-4k-instruct",
|
||||||
|
"nvidia_nim/microsoft/phi-3-medium-128k-instruct",
|
||||||
|
"nvidia_nim/microsoft/phi-3.5-mini-instruct",
|
||||||
|
"nvidia_nim/microsoft/phi-3.5-moe-instruct",
|
||||||
|
"nvidia_nim/microsoft/kosmos-2",
|
||||||
|
"nvidia_nim/microsoft/phi-3-vision-128k-instruct",
|
||||||
|
"nvidia_nim/microsoft/phi-3.5-vision-instruct",
|
||||||
|
"nvidia_nim/databricks/dbrx-instruct",
|
||||||
|
"nvidia_nim/snowflake/arctic",
|
||||||
|
"nvidia_nim/aisingapore/sea-lion-7b-instruct",
|
||||||
|
"nvidia_nim/ibm/granite-8b-code-instruct",
|
||||||
|
"nvidia_nim/ibm/granite-34b-code-instruct",
|
||||||
|
"nvidia_nim/ibm/granite-3.0-8b-instruct",
|
||||||
|
"nvidia_nim/ibm/granite-3.0-3b-a800m-instruct",
|
||||||
|
"nvidia_nim/mediatek/breeze-7b-instruct",
|
||||||
|
"nvidia_nim/upstage/solar-10.7b-instruct",
|
||||||
|
"nvidia_nim/writer/palmyra-med-70b-32k",
|
||||||
|
"nvidia_nim/writer/palmyra-med-70b",
|
||||||
|
"nvidia_nim/writer/palmyra-fin-70b-32k",
|
||||||
|
"nvidia_nim/01-ai/yi-large",
|
||||||
|
"nvidia_nim/deepseek-ai/deepseek-coder-6.7b-instruct",
|
||||||
|
"nvidia_nim/rakuten/rakutenai-7b-instruct",
|
||||||
|
"nvidia_nim/rakuten/rakutenai-7b-chat",
|
||||||
|
"nvidia_nim/baichuan-inc/baichuan2-13b-chat",
|
||||||
|
],
|
||||||
"groq": [
|
"groq": [
|
||||||
"groq/llama-3.1-8b-instant",
|
"groq/llama-3.1-8b-instant",
|
||||||
"groq/llama-3.1-70b-versatile",
|
"groq/llama-3.1-70b-versatile",
|
||||||
|
|||||||
@@ -76,7 +76,7 @@ LLM_CONTEXT_WINDOW_SIZES = {
|
|||||||
"mixtral-8x7b-32768": 32768,
|
"mixtral-8x7b-32768": 32768,
|
||||||
"llama-3.3-70b-versatile": 128000,
|
"llama-3.3-70b-versatile": 128000,
|
||||||
"llama-3.3-70b-instruct": 128000,
|
"llama-3.3-70b-instruct": 128000,
|
||||||
#sambanova
|
# sambanova
|
||||||
"Meta-Llama-3.3-70B-Instruct": 131072,
|
"Meta-Llama-3.3-70B-Instruct": 131072,
|
||||||
"QwQ-32B-Preview": 8192,
|
"QwQ-32B-Preview": 8192,
|
||||||
"Qwen2.5-72B-Instruct": 8192,
|
"Qwen2.5-72B-Instruct": 8192,
|
||||||
|
|||||||
@@ -27,7 +27,7 @@
|
|||||||
"conversation_history_instruction": "You are a member of a crew collaborating to achieve a common goal. Your task is a specific action that contributes to this larger objective. For additional context, please review the conversation history between you and the user that led to the initiation of this crew. Use any relevant information or feedback from the conversation to inform your task execution and ensure your response aligns with both the immediate task and the crew's overall goals."
|
"conversation_history_instruction": "You are a member of a crew collaborating to achieve a common goal. Your task is a specific action that contributes to this larger objective. For additional context, please review the conversation history between you and the user that led to the initiation of this crew. Use any relevant information or feedback from the conversation to inform your task execution and ensure your response aligns with both the immediate task and the crew's overall goals."
|
||||||
},
|
},
|
||||||
"errors": {
|
"errors": {
|
||||||
"force_final_answer_error": "You can't keep going, this was the best you could do.\n {formatted_answer.text}",
|
"force_final_answer_error": "You can't keep going, here is the best final answer you generated:\n\n {formatted_answer}",
|
||||||
"force_final_answer": "Now it's time you MUST give your absolute best final answer. You'll ignore all previous instructions, stop using any tools, and just return your absolute BEST Final answer.",
|
"force_final_answer": "Now it's time you MUST give your absolute best final answer. You'll ignore all previous instructions, stop using any tools, and just return your absolute BEST Final answer.",
|
||||||
"agent_tool_unexisting_coworker": "\nError executing tool. coworker mentioned not found, it must be one of the following options:\n{coworkers}\n",
|
"agent_tool_unexisting_coworker": "\nError executing tool. coworker mentioned not found, it must be one of the following options:\n{coworkers}\n",
|
||||||
"task_repeated_usage": "I tried reusing the same input, I must stop using this action input. I'll try something else instead.\n\n",
|
"task_repeated_usage": "I tried reusing the same input, I must stop using this action input. I'll try something else instead.\n\n",
|
||||||
|
|||||||
@@ -67,7 +67,6 @@ def create_llm(
|
|||||||
api_key=api_key,
|
api_key=api_key,
|
||||||
base_url=base_url,
|
base_url=base_url,
|
||||||
)
|
)
|
||||||
print("LLM created with extracted parameters; " f"model='{model}'")
|
|
||||||
return created_llm
|
return created_llm
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Error instantiating LLM from unknown object type: {e}")
|
print(f"Error instantiating LLM from unknown object type: {e}")
|
||||||
|
|||||||
@@ -8,8 +8,10 @@ from crewai.utilities.logger import Logger
|
|||||||
|
|
||||||
"""Controls request rate limiting for API calls."""
|
"""Controls request rate limiting for API calls."""
|
||||||
|
|
||||||
|
|
||||||
class RPMController(BaseModel):
|
class RPMController(BaseModel):
|
||||||
"""Manages requests per minute limiting."""
|
"""Manages requests per minute limiting."""
|
||||||
|
|
||||||
max_rpm: Optional[int] = Field(default=None)
|
max_rpm: Optional[int] = Field(default=None)
|
||||||
logger: Logger = Field(default_factory=lambda: Logger(verbose=False))
|
logger: Logger = Field(default_factory=lambda: Logger(verbose=False))
|
||||||
_current_rpm: int = PrivateAttr(default=0)
|
_current_rpm: int = PrivateAttr(default=0)
|
||||||
|
|||||||
@@ -565,7 +565,7 @@ def test_agent_moved_on_after_max_iterations():
|
|||||||
task=task,
|
task=task,
|
||||||
tools=[get_final_answer],
|
tools=[get_final_answer],
|
||||||
)
|
)
|
||||||
assert output == "The final answer is 42."
|
assert output == "42"
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
@@ -574,7 +574,6 @@ def test_agent_respect_the_max_rpm_set(capsys):
|
|||||||
def get_final_answer() -> float:
|
def get_final_answer() -> float:
|
||||||
"""Get the final answer but don't give it yet, just re-use this
|
"""Get the final answer but don't give it yet, just re-use this
|
||||||
tool non-stop."""
|
tool non-stop."""
|
||||||
return 42
|
|
||||||
|
|
||||||
agent = Agent(
|
agent = Agent(
|
||||||
role="test role",
|
role="test role",
|
||||||
@@ -641,15 +640,14 @@ def test_agent_respect_the_max_rpm_set_over_crew_rpm(capsys):
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
def test_agent_without_max_rpm_respet_crew_rpm(capsys):
|
def test_agent_without_max_rpm_respects_crew_rpm(capsys):
|
||||||
from unittest.mock import patch
|
from unittest.mock import patch
|
||||||
|
|
||||||
from crewai.tools import tool
|
from crewai.tools import tool
|
||||||
|
|
||||||
@tool
|
@tool
|
||||||
def get_final_answer() -> float:
|
def get_final_answer() -> float:
|
||||||
"""Get the final answer but don't give it yet, just re-use this
|
"""Get the final answer but don't give it yet, just re-use this tool non-stop."""
|
||||||
tool non-stop."""
|
|
||||||
return 42
|
return 42
|
||||||
|
|
||||||
agent1 = Agent(
|
agent1 = Agent(
|
||||||
@@ -666,23 +664,30 @@ def test_agent_without_max_rpm_respet_crew_rpm(capsys):
|
|||||||
role="test role2",
|
role="test role2",
|
||||||
goal="test goal2",
|
goal="test goal2",
|
||||||
backstory="test backstory2",
|
backstory="test backstory2",
|
||||||
max_iter=1,
|
max_iter=5,
|
||||||
verbose=True,
|
verbose=True,
|
||||||
allow_delegation=False,
|
allow_delegation=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
tasks = [
|
tasks = [
|
||||||
Task(
|
Task(
|
||||||
description="Just say hi.", agent=agent1, expected_output="Your greeting."
|
description="Just say hi.",
|
||||||
|
agent=agent1,
|
||||||
|
expected_output="Your greeting.",
|
||||||
),
|
),
|
||||||
Task(
|
Task(
|
||||||
description="NEVER give a Final Answer, unless you are told otherwise, instead keep using the `get_final_answer` tool non-stop, until you must give you best final answer",
|
description=(
|
||||||
|
"NEVER give a Final Answer, unless you are told otherwise, "
|
||||||
|
"instead keep using the `get_final_answer` tool non-stop, "
|
||||||
|
"until you must give your best final answer"
|
||||||
|
),
|
||||||
expected_output="The final answer",
|
expected_output="The final answer",
|
||||||
tools=[get_final_answer],
|
tools=[get_final_answer],
|
||||||
agent=agent2,
|
agent=agent2,
|
||||||
),
|
),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
# Set crew's max_rpm to 1 to trigger RPM limit
|
||||||
crew = Crew(agents=[agent1, agent2], tasks=tasks, max_rpm=1, verbose=True)
|
crew = Crew(agents=[agent1, agent2], tasks=tasks, max_rpm=1, verbose=True)
|
||||||
|
|
||||||
with patch.object(RPMController, "_wait_for_next_minute") as moveon:
|
with patch.object(RPMController, "_wait_for_next_minute") as moveon:
|
||||||
|
|||||||
@@ -2,22 +2,22 @@ interactions:
|
|||||||
- request:
|
- request:
|
||||||
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
||||||
personal goal is: test goal\nYou ONLY have access to the following tools, and
|
personal goal is: test goal\nYou ONLY have access to the following tools, and
|
||||||
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer(*args:
|
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer\nTool
|
||||||
Any, **kwargs: Any) -> Any\nTool Description: get_final_answer() - Get the final
|
Arguments: {}\nTool Description: Get the final answer but don''t give it yet,
|
||||||
answer but don''t give it yet, just re-use this tool non-stop. \nTool
|
just re-use this\n tool non-stop.\n\nUse the following format:\n\nThought:
|
||||||
Arguments: {}\n\nUse the following format:\n\nThought: you should always think
|
you should always think about what to do\nAction: the action to take, only one
|
||||||
about what to do\nAction: the action to take, only one name of [get_final_answer],
|
name of [get_final_answer], just the name, exactly as it''s written.\nAction
|
||||||
just the name, exactly as it''s written.\nAction Input: the input to the action,
|
Input: the input to the action, just a simple python dictionary, enclosed in
|
||||||
just a simple python dictionary, enclosed in curly braces, using \" to wrap
|
curly braces, using \" to wrap keys and values.\nObservation: the result of
|
||||||
keys and values.\nObservation: the result of the action\n\nOnce all necessary
|
the action\n\nOnce all necessary information is gathered:\n\nThought: I now
|
||||||
information is gathered:\n\nThought: I now know the final answer\nFinal Answer:
|
know the final answer\nFinal Answer: the final answer to the original input
|
||||||
the final answer to the original input question\n"}, {"role": "user", "content":
|
question"}, {"role": "user", "content": "\nCurrent Task: The final answer is
|
||||||
"\nCurrent Task: The final answer is 42. But don''t give it yet, instead keep
|
42. But don''t give it yet, instead keep using the `get_final_answer` tool.\n\nThis
|
||||||
using the `get_final_answer` tool.\n\nThis is the expect criteria for your final
|
is the expect criteria for your final answer: The final answer\nyou MUST return
|
||||||
answer: The final answer\nyou MUST return the actual complete content as the
|
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||||
final answer, not a summary.\n\nBegin! This is VERY important to you, use the
|
is VERY important to you, use the tools available and give your best Final Answer,
|
||||||
tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],
|
your job depends on it!\n\nThought:"}], "model": "gpt-4o", "stop": ["\nObservation:"],
|
||||||
"model": "gpt-4o", "stop": ["\nObservation:"]}'
|
"stream": false}'
|
||||||
headers:
|
headers:
|
||||||
accept:
|
accept:
|
||||||
- application/json
|
- application/json
|
||||||
@@ -26,16 +26,15 @@ interactions:
|
|||||||
connection:
|
connection:
|
||||||
- keep-alive
|
- keep-alive
|
||||||
content-length:
|
content-length:
|
||||||
- '1417'
|
- '1377'
|
||||||
content-type:
|
content-type:
|
||||||
- application/json
|
- application/json
|
||||||
cookie:
|
cookie:
|
||||||
- __cf_bm=rb61BZH2ejzD5YPmLaEJqI7km71QqyNJGTVdNxBq6qk-1727213194-1.0.1.1-pJ49onmgX9IugEMuYQMralzD7oj_6W.CHbSu4Su1z3NyjTGYg.rhgJZWng8feFYah._oSnoYlkTjpK1Wd2C9FA;
|
- _cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
|
||||||
_cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
|
|
||||||
host:
|
host:
|
||||||
- api.openai.com
|
- api.openai.com
|
||||||
user-agent:
|
user-agent:
|
||||||
- OpenAI/Python 1.47.0
|
- OpenAI/Python 1.52.1
|
||||||
x-stainless-arch:
|
x-stainless-arch:
|
||||||
- arm64
|
- arm64
|
||||||
x-stainless-async:
|
x-stainless-async:
|
||||||
@@ -45,30 +44,35 @@ interactions:
|
|||||||
x-stainless-os:
|
x-stainless-os:
|
||||||
- MacOS
|
- MacOS
|
||||||
x-stainless-package-version:
|
x-stainless-package-version:
|
||||||
- 1.47.0
|
- 1.52.1
|
||||||
x-stainless-raw-response:
|
x-stainless-raw-response:
|
||||||
- 'true'
|
- 'true'
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
x-stainless-runtime:
|
x-stainless-runtime:
|
||||||
- CPython
|
- CPython
|
||||||
x-stainless-runtime-version:
|
x-stainless-runtime-version:
|
||||||
- 3.11.7
|
- 3.12.7
|
||||||
method: POST
|
method: POST
|
||||||
uri: https://api.openai.com/v1/chat/completions
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
response:
|
response:
|
||||||
content: "{\n \"id\": \"chatcmpl-AB7NCE9qkjnVxfeWuK9NjyCdymuXJ\",\n \"object\":
|
content: "{\n \"id\": \"chatcmpl-An9sn6yimejzB3twOt8E2VAj4Bfmm\",\n \"object\":
|
||||||
\"chat.completion\",\n \"created\": 1727213314,\n \"model\": \"gpt-4o-2024-05-13\",\n
|
\"chat.completion\",\n \"created\": 1736279425,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
\"assistant\",\n \"content\": \"Thought: I need to use the `get_final_answer`
|
\"assistant\",\n \"content\": \"Thought: I need to use the `get_final_answer`
|
||||||
tool as instructed.\\n\\nAction: get_final_answer\\nAction Input: {}\",\n \"refusal\":
|
tool to fulfill the current task requirement.\\n\\nAction: get_final_answer\\nAction
|
||||||
null\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
Input: {}\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 291,\n \"completion_tokens\":
|
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||||
26,\n \"total_tokens\": 317,\n \"completion_tokens_details\": {\n \"reasoning_tokens\":
|
273,\n \"completion_tokens\": 30,\n \"total_tokens\": 303,\n \"prompt_tokens_details\":
|
||||||
0\n }\n },\n \"system_fingerprint\": \"fp_e375328146\"\n}\n"
|
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||||
|
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
|
||||||
|
\"fp_5f20662549\"\n}\n"
|
||||||
headers:
|
headers:
|
||||||
CF-Cache-Status:
|
CF-Cache-Status:
|
||||||
- DYNAMIC
|
- DYNAMIC
|
||||||
CF-RAY:
|
CF-RAY:
|
||||||
- 8c85dd6b5f411cf3-GRU
|
- 8fe67a03ce78ed83-ATL
|
||||||
Connection:
|
Connection:
|
||||||
- keep-alive
|
- keep-alive
|
||||||
Content-Encoding:
|
Content-Encoding:
|
||||||
@@ -76,19 +80,27 @@ interactions:
|
|||||||
Content-Type:
|
Content-Type:
|
||||||
- application/json
|
- application/json
|
||||||
Date:
|
Date:
|
||||||
- Tue, 24 Sep 2024 21:28:34 GMT
|
- Tue, 07 Jan 2025 19:50:25 GMT
|
||||||
Server:
|
Server:
|
||||||
- cloudflare
|
- cloudflare
|
||||||
|
Set-Cookie:
|
||||||
|
- __cf_bm=PsMOhP_yeSFIMA.FfRlNbisoG88z4l9NSd0zfS5UrOQ-1736279425-1.0.1.1-mdXy_XDkelJX2.9BSuZsl5IsPRGBdcHgIMc_SRz83WcmGCYUkTm1j_f892xrJbOVheWWH9ULwCQrVESupV37Sg;
|
||||||
|
path=/; expires=Tue, 07-Jan-25 20:20:25 GMT; domain=.api.openai.com; HttpOnly;
|
||||||
|
Secure; SameSite=None
|
||||||
|
- _cfuvid=EYb4UftLm_C7qM4YT78IJt46hRSubZHKnfTXhFp6ZRU-1736279425874-0.0.1.1-604800000;
|
||||||
|
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||||
Transfer-Encoding:
|
Transfer-Encoding:
|
||||||
- chunked
|
- chunked
|
||||||
X-Content-Type-Options:
|
X-Content-Type-Options:
|
||||||
- nosniff
|
- nosniff
|
||||||
access-control-expose-headers:
|
access-control-expose-headers:
|
||||||
- X-Request-ID
|
- X-Request-ID
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
openai-organization:
|
openai-organization:
|
||||||
- crewai-iuxna1
|
- crewai-iuxna1
|
||||||
openai-processing-ms:
|
openai-processing-ms:
|
||||||
- '526'
|
- '1218'
|
||||||
openai-version:
|
openai-version:
|
||||||
- '2020-10-01'
|
- '2020-10-01'
|
||||||
strict-transport-security:
|
strict-transport-security:
|
||||||
@@ -100,38 +112,38 @@ interactions:
|
|||||||
x-ratelimit-remaining-requests:
|
x-ratelimit-remaining-requests:
|
||||||
- '9999'
|
- '9999'
|
||||||
x-ratelimit-remaining-tokens:
|
x-ratelimit-remaining-tokens:
|
||||||
- '29999666'
|
- '29999681'
|
||||||
x-ratelimit-reset-requests:
|
x-ratelimit-reset-requests:
|
||||||
- 6ms
|
- 6ms
|
||||||
x-ratelimit-reset-tokens:
|
x-ratelimit-reset-tokens:
|
||||||
- 0s
|
- 0s
|
||||||
x-request-id:
|
x-request-id:
|
||||||
- req_ed8ca24c64cfdc2b6266c9c8438749f5
|
- req_779992da2a3eb4a25f0b57905c9e8e41
|
||||||
http_version: HTTP/1.1
|
http_version: HTTP/1.1
|
||||||
status_code: 200
|
status_code: 200
|
||||||
- request:
|
- request:
|
||||||
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
||||||
personal goal is: test goal\nYou ONLY have access to the following tools, and
|
personal goal is: test goal\nYou ONLY have access to the following tools, and
|
||||||
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer(*args:
|
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer\nTool
|
||||||
Any, **kwargs: Any) -> Any\nTool Description: get_final_answer() - Get the final
|
Arguments: {}\nTool Description: Get the final answer but don''t give it yet,
|
||||||
answer but don''t give it yet, just re-use this tool non-stop. \nTool
|
just re-use this\n tool non-stop.\n\nUse the following format:\n\nThought:
|
||||||
Arguments: {}\n\nUse the following format:\n\nThought: you should always think
|
you should always think about what to do\nAction: the action to take, only one
|
||||||
about what to do\nAction: the action to take, only one name of [get_final_answer],
|
name of [get_final_answer], just the name, exactly as it''s written.\nAction
|
||||||
just the name, exactly as it''s written.\nAction Input: the input to the action,
|
Input: the input to the action, just a simple python dictionary, enclosed in
|
||||||
just a simple python dictionary, enclosed in curly braces, using \" to wrap
|
curly braces, using \" to wrap keys and values.\nObservation: the result of
|
||||||
keys and values.\nObservation: the result of the action\n\nOnce all necessary
|
the action\n\nOnce all necessary information is gathered:\n\nThought: I now
|
||||||
information is gathered:\n\nThought: I now know the final answer\nFinal Answer:
|
know the final answer\nFinal Answer: the final answer to the original input
|
||||||
the final answer to the original input question\n"}, {"role": "user", "content":
|
question"}, {"role": "user", "content": "\nCurrent Task: The final answer is
|
||||||
"\nCurrent Task: The final answer is 42. But don''t give it yet, instead keep
|
42. But don''t give it yet, instead keep using the `get_final_answer` tool.\n\nThis
|
||||||
using the `get_final_answer` tool.\n\nThis is the expect criteria for your final
|
is the expect criteria for your final answer: The final answer\nyou MUST return
|
||||||
answer: The final answer\nyou MUST return the actual complete content as the
|
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||||
final answer, not a summary.\n\nBegin! This is VERY important to you, use the
|
is VERY important to you, use the tools available and give your best Final Answer,
|
||||||
tools available and give your best Final Answer, your job depends on it!\n\nThought:"},
|
your job depends on it!\n\nThought:"}, {"role": "assistant", "content": "Thought:
|
||||||
{"role": "assistant", "content": "Thought: I need to use the `get_final_answer`
|
I need to use the `get_final_answer` tool to fulfill the current task requirement.\n\nAction:
|
||||||
tool as instructed.\n\nAction: get_final_answer\nAction Input: {}\nObservation:
|
get_final_answer\nAction Input: {}\nObservation: 42\nNow it''s time you MUST
|
||||||
42\nNow it''s time you MUST give your absolute best final answer. You''ll ignore
|
give your absolute best final answer. You''ll ignore all previous instructions,
|
||||||
all previous instructions, stop using any tools, and just return your absolute
|
stop using any tools, and just return your absolute BEST Final answer."}], "model":
|
||||||
BEST Final answer."}], "model": "gpt-4o", "stop": ["\nObservation:"]}'
|
"gpt-4o", "stop": ["\nObservation:"], "stream": false}'
|
||||||
headers:
|
headers:
|
||||||
accept:
|
accept:
|
||||||
- application/json
|
- application/json
|
||||||
@@ -140,16 +152,16 @@ interactions:
|
|||||||
connection:
|
connection:
|
||||||
- keep-alive
|
- keep-alive
|
||||||
content-length:
|
content-length:
|
||||||
- '1757'
|
- '1743'
|
||||||
content-type:
|
content-type:
|
||||||
- application/json
|
- application/json
|
||||||
cookie:
|
cookie:
|
||||||
- __cf_bm=rb61BZH2ejzD5YPmLaEJqI7km71QqyNJGTVdNxBq6qk-1727213194-1.0.1.1-pJ49onmgX9IugEMuYQMralzD7oj_6W.CHbSu4Su1z3NyjTGYg.rhgJZWng8feFYah._oSnoYlkTjpK1Wd2C9FA;
|
- _cfuvid=EYb4UftLm_C7qM4YT78IJt46hRSubZHKnfTXhFp6ZRU-1736279425874-0.0.1.1-604800000;
|
||||||
_cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
|
__cf_bm=PsMOhP_yeSFIMA.FfRlNbisoG88z4l9NSd0zfS5UrOQ-1736279425-1.0.1.1-mdXy_XDkelJX2.9BSuZsl5IsPRGBdcHgIMc_SRz83WcmGCYUkTm1j_f892xrJbOVheWWH9ULwCQrVESupV37Sg
|
||||||
host:
|
host:
|
||||||
- api.openai.com
|
- api.openai.com
|
||||||
user-agent:
|
user-agent:
|
||||||
- OpenAI/Python 1.47.0
|
- OpenAI/Python 1.52.1
|
||||||
x-stainless-arch:
|
x-stainless-arch:
|
||||||
- arm64
|
- arm64
|
||||||
x-stainless-async:
|
x-stainless-async:
|
||||||
@@ -159,29 +171,34 @@ interactions:
|
|||||||
x-stainless-os:
|
x-stainless-os:
|
||||||
- MacOS
|
- MacOS
|
||||||
x-stainless-package-version:
|
x-stainless-package-version:
|
||||||
- 1.47.0
|
- 1.52.1
|
||||||
x-stainless-raw-response:
|
x-stainless-raw-response:
|
||||||
- 'true'
|
- 'true'
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
x-stainless-runtime:
|
x-stainless-runtime:
|
||||||
- CPython
|
- CPython
|
||||||
x-stainless-runtime-version:
|
x-stainless-runtime-version:
|
||||||
- 3.11.7
|
- 3.12.7
|
||||||
method: POST
|
method: POST
|
||||||
uri: https://api.openai.com/v1/chat/completions
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
response:
|
response:
|
||||||
content: "{\n \"id\": \"chatcmpl-AB7NDCKCn3PlhjPvgqbywxUumo3Qt\",\n \"object\":
|
content: "{\n \"id\": \"chatcmpl-An9soTDQVS0ANTzaTZeo6lYN44ZPR\",\n \"object\":
|
||||||
\"chat.completion\",\n \"created\": 1727213315,\n \"model\": \"gpt-4o-2024-05-13\",\n
|
\"chat.completion\",\n \"created\": 1736279426,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
\"assistant\",\n \"content\": \"Thought: I now know the final answer\\nFinal
|
\"assistant\",\n \"content\": \"I now know the final answer.\\n\\nFinal
|
||||||
Answer: The final answer is 42.\",\n \"refusal\": null\n },\n \"logprobs\":
|
Answer: 42\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||||
358,\n \"completion_tokens\": 19,\n \"total_tokens\": 377,\n \"completion_tokens_details\":
|
344,\n \"completion_tokens\": 12,\n \"total_tokens\": 356,\n \"prompt_tokens_details\":
|
||||||
{\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_e375328146\"\n}\n"
|
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||||
|
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
|
||||||
|
\"fp_5f20662549\"\n}\n"
|
||||||
headers:
|
headers:
|
||||||
CF-Cache-Status:
|
CF-Cache-Status:
|
||||||
- DYNAMIC
|
- DYNAMIC
|
||||||
CF-RAY:
|
CF-RAY:
|
||||||
- 8c85dd72daa31cf3-GRU
|
- 8fe67a0c4dbeed83-ATL
|
||||||
Connection:
|
Connection:
|
||||||
- keep-alive
|
- keep-alive
|
||||||
Content-Encoding:
|
Content-Encoding:
|
||||||
@@ -189,7 +206,7 @@ interactions:
|
|||||||
Content-Type:
|
Content-Type:
|
||||||
- application/json
|
- application/json
|
||||||
Date:
|
Date:
|
||||||
- Tue, 24 Sep 2024 21:28:36 GMT
|
- Tue, 07 Jan 2025 19:50:26 GMT
|
||||||
Server:
|
Server:
|
||||||
- cloudflare
|
- cloudflare
|
||||||
Transfer-Encoding:
|
Transfer-Encoding:
|
||||||
@@ -198,10 +215,12 @@ interactions:
|
|||||||
- nosniff
|
- nosniff
|
||||||
access-control-expose-headers:
|
access-control-expose-headers:
|
||||||
- X-Request-ID
|
- X-Request-ID
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
openai-organization:
|
openai-organization:
|
||||||
- crewai-iuxna1
|
- crewai-iuxna1
|
||||||
openai-processing-ms:
|
openai-processing-ms:
|
||||||
- '468'
|
- '434'
|
||||||
openai-version:
|
openai-version:
|
||||||
- '2020-10-01'
|
- '2020-10-01'
|
||||||
strict-transport-security:
|
strict-transport-security:
|
||||||
@@ -213,13 +232,13 @@ interactions:
|
|||||||
x-ratelimit-remaining-requests:
|
x-ratelimit-remaining-requests:
|
||||||
- '9999'
|
- '9999'
|
||||||
x-ratelimit-remaining-tokens:
|
x-ratelimit-remaining-tokens:
|
||||||
- '29999591'
|
- '29999598'
|
||||||
x-ratelimit-reset-requests:
|
x-ratelimit-reset-requests:
|
||||||
- 6ms
|
- 6ms
|
||||||
x-ratelimit-reset-tokens:
|
x-ratelimit-reset-tokens:
|
||||||
- 0s
|
- 0s
|
||||||
x-request-id:
|
x-request-id:
|
||||||
- req_3f49e6033d3b0400ea55125ca2cf4ee0
|
- req_1184308c5a4ed9130d397fe1645f317e
|
||||||
http_version: HTTP/1.1
|
http_version: HTTP/1.1
|
||||||
status_code: 200
|
status_code: 200
|
||||||
version: 1
|
version: 1
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -2,23 +2,23 @@ interactions:
|
|||||||
- request:
|
- request:
|
||||||
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
||||||
personal goal is: test goal\nYou ONLY have access to the following tools, and
|
personal goal is: test goal\nYou ONLY have access to the following tools, and
|
||||||
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer(*args:
|
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer\nTool
|
||||||
Any, **kwargs: Any) -> Any\nTool Description: get_final_answer() - Get the final
|
Arguments: {}\nTool Description: Get the final answer but don''t give it yet,
|
||||||
answer but don''t give it yet, just re-use this tool non-stop. \nTool
|
just re-use this\n tool non-stop.\n\nUse the following format:\n\nThought:
|
||||||
Arguments: {}\n\nUse the following format:\n\nThought: you should always think
|
you should always think about what to do\nAction: the action to take, only one
|
||||||
about what to do\nAction: the action to take, only one name of [get_final_answer],
|
name of [get_final_answer], just the name, exactly as it''s written.\nAction
|
||||||
just the name, exactly as it''s written.\nAction Input: the input to the action,
|
Input: the input to the action, just a simple python dictionary, enclosed in
|
||||||
just a simple python dictionary, enclosed in curly braces, using \" to wrap
|
curly braces, using \" to wrap keys and values.\nObservation: the result of
|
||||||
keys and values.\nObservation: the result of the action\n\nOnce all necessary
|
the action\n\nOnce all necessary information is gathered:\n\nThought: I now
|
||||||
information is gathered:\n\nThought: I now know the final answer\nFinal Answer:
|
know the final answer\nFinal Answer: the final answer to the original input
|
||||||
the final answer to the original input question\n"}, {"role": "user", "content":
|
question"}, {"role": "user", "content": "\nCurrent Task: The final answer is
|
||||||
"\nCurrent Task: The final answer is 42. But don''t give it yet, instead keep
|
42. But don''t give it yet, instead keep using the `get_final_answer` tool over
|
||||||
using the `get_final_answer` tool over and over until you''re told you can give
|
and over until you''re told you can give your final answer.\n\nThis is the expect
|
||||||
your final answer.\n\nThis is the expect criteria for your final answer: The
|
criteria for your final answer: The final answer\nyou MUST return the actual
|
||||||
final answer\nyou MUST return the actual complete content as the final answer,
|
complete content as the final answer, not a summary.\n\nBegin! This is VERY
|
||||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
important to you, use the tools available and give your best Final Answer, your
|
||||||
and give your best Final Answer, your job depends on it!\n\nThought:"}], "model":
|
job depends on it!\n\nThought:"}], "model": "gpt-4o", "stop": ["\nObservation:"],
|
||||||
"gpt-4o"}'
|
"stream": false}'
|
||||||
headers:
|
headers:
|
||||||
accept:
|
accept:
|
||||||
- application/json
|
- application/json
|
||||||
@@ -27,16 +27,13 @@ interactions:
|
|||||||
connection:
|
connection:
|
||||||
- keep-alive
|
- keep-alive
|
||||||
content-length:
|
content-length:
|
||||||
- '1452'
|
- '1440'
|
||||||
content-type:
|
content-type:
|
||||||
- application/json
|
- application/json
|
||||||
cookie:
|
|
||||||
- __cf_bm=rb61BZH2ejzD5YPmLaEJqI7km71QqyNJGTVdNxBq6qk-1727213194-1.0.1.1-pJ49onmgX9IugEMuYQMralzD7oj_6W.CHbSu4Su1z3NyjTGYg.rhgJZWng8feFYah._oSnoYlkTjpK1Wd2C9FA;
|
|
||||||
_cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
|
|
||||||
host:
|
host:
|
||||||
- api.openai.com
|
- api.openai.com
|
||||||
user-agent:
|
user-agent:
|
||||||
- OpenAI/Python 1.47.0
|
- OpenAI/Python 1.52.1
|
||||||
x-stainless-arch:
|
x-stainless-arch:
|
||||||
- arm64
|
- arm64
|
||||||
x-stainless-async:
|
x-stainless-async:
|
||||||
@@ -46,30 +43,285 @@ interactions:
|
|||||||
x-stainless-os:
|
x-stainless-os:
|
||||||
- MacOS
|
- MacOS
|
||||||
x-stainless-package-version:
|
x-stainless-package-version:
|
||||||
- 1.47.0
|
- 1.52.1
|
||||||
x-stainless-raw-response:
|
x-stainless-raw-response:
|
||||||
- 'true'
|
- 'true'
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
x-stainless-runtime:
|
x-stainless-runtime:
|
||||||
- CPython
|
- CPython
|
||||||
x-stainless-runtime-version:
|
x-stainless-runtime-version:
|
||||||
- 3.11.7
|
- 3.12.7
|
||||||
method: POST
|
method: POST
|
||||||
uri: https://api.openai.com/v1/chat/completions
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
response:
|
response:
|
||||||
content: "{\n \"id\": \"chatcmpl-AB7NlDmtLHCfUZJCFVIKeV5KMyQfX\",\n \"object\":
|
content: "{\n \"id\": \"chatcmpl-AnAdPHapYzkPkClCzFaWzfCAUHlWI\",\n \"object\":
|
||||||
\"chat.completion\",\n \"created\": 1727213349,\n \"model\": \"gpt-4o-2024-05-13\",\n
|
\"chat.completion\",\n \"created\": 1736282315,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
\"assistant\",\n \"content\": \"Thought: I need to use the provided tool
|
\"assistant\",\n \"content\": \"I need to use the `get_final_answer`
|
||||||
as instructed.\\n\\nAction: get_final_answer\\nAction Input: {}\",\n \"refusal\":
|
tool and then keep using it repeatedly as instructed. \\n\\nAction: get_final_answer\\nAction
|
||||||
|
Input: {}\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||||
|
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||||
|
285,\n \"completion_tokens\": 31,\n \"total_tokens\": 316,\n \"prompt_tokens_details\":
|
||||||
|
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||||
|
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
|
||||||
|
\"fp_5f20662549\"\n}\n"
|
||||||
|
headers:
|
||||||
|
CF-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
CF-RAY:
|
||||||
|
- 8fe6c096ee70ed8c-ATL
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Tue, 07 Jan 2025 20:38:36 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
Set-Cookie:
|
||||||
|
- __cf_bm=hkH74Rv9bMDMhhK.Ep.9blvKIwXeSSwlCoTNGk9qVpA-1736282316-1.0.1.1-5PAsOPpVEfTNNy5DYRlLH1f4caHJArumiloWf.L51RQPWN3uIWsBSuhLVbNQDYVCQb9RQK8W5DcXv5Jq9FvsLA;
|
||||||
|
path=/; expires=Tue, 07-Jan-25 21:08:36 GMT; domain=.api.openai.com; HttpOnly;
|
||||||
|
Secure; SameSite=None
|
||||||
|
- _cfuvid=vqZ5X0AXIJfzp5UJSFyTmaCVjA.L8Yg35b.ijZFAPM4-1736282316289-0.0.1.1-604800000;
|
||||||
|
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
X-Content-Type-Options:
|
||||||
|
- nosniff
|
||||||
|
access-control-expose-headers:
|
||||||
|
- X-Request-ID
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
openai-organization:
|
||||||
|
- crewai-iuxna1
|
||||||
|
openai-processing-ms:
|
||||||
|
- '883'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=31536000; includeSubDomains; preload
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '10000'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '30000000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '9999'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '29999665'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 6ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 0s
|
||||||
|
x-request-id:
|
||||||
|
- req_00de12bc6822ef095f4f368aae873f31
|
||||||
|
http_version: HTTP/1.1
|
||||||
|
status_code: 200
|
||||||
|
- request:
|
||||||
|
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
||||||
|
personal goal is: test goal\nYou ONLY have access to the following tools, and
|
||||||
|
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer\nTool
|
||||||
|
Arguments: {}\nTool Description: Get the final answer but don''t give it yet,
|
||||||
|
just re-use this\n tool non-stop.\n\nUse the following format:\n\nThought:
|
||||||
|
you should always think about what to do\nAction: the action to take, only one
|
||||||
|
name of [get_final_answer], just the name, exactly as it''s written.\nAction
|
||||||
|
Input: the input to the action, just a simple python dictionary, enclosed in
|
||||||
|
curly braces, using \" to wrap keys and values.\nObservation: the result of
|
||||||
|
the action\n\nOnce all necessary information is gathered:\n\nThought: I now
|
||||||
|
know the final answer\nFinal Answer: the final answer to the original input
|
||||||
|
question"}, {"role": "user", "content": "\nCurrent Task: The final answer is
|
||||||
|
42. But don''t give it yet, instead keep using the `get_final_answer` tool over
|
||||||
|
and over until you''re told you can give your final answer.\n\nThis is the expect
|
||||||
|
criteria for your final answer: The final answer\nyou MUST return the actual
|
||||||
|
complete content as the final answer, not a summary.\n\nBegin! This is VERY
|
||||||
|
important to you, use the tools available and give your best Final Answer, your
|
||||||
|
job depends on it!\n\nThought:"}, {"role": "assistant", "content": "I need to
|
||||||
|
use the `get_final_answer` tool and then keep using it repeatedly as instructed.
|
||||||
|
\n\nAction: get_final_answer\nAction Input: {}\nObservation: 42"}], "model":
|
||||||
|
"gpt-4o", "stop": ["\nObservation:"], "stream": false}'
|
||||||
|
headers:
|
||||||
|
accept:
|
||||||
|
- application/json
|
||||||
|
accept-encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '1632'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
cookie:
|
||||||
|
- __cf_bm=hkH74Rv9bMDMhhK.Ep.9blvKIwXeSSwlCoTNGk9qVpA-1736282316-1.0.1.1-5PAsOPpVEfTNNy5DYRlLH1f4caHJArumiloWf.L51RQPWN3uIWsBSuhLVbNQDYVCQb9RQK8W5DcXv5Jq9FvsLA;
|
||||||
|
_cfuvid=vqZ5X0AXIJfzp5UJSFyTmaCVjA.L8Yg35b.ijZFAPM4-1736282316289-0.0.1.1-604800000
|
||||||
|
host:
|
||||||
|
- api.openai.com
|
||||||
|
user-agent:
|
||||||
|
- OpenAI/Python 1.52.1
|
||||||
|
x-stainless-arch:
|
||||||
|
- arm64
|
||||||
|
x-stainless-async:
|
||||||
|
- 'false'
|
||||||
|
x-stainless-lang:
|
||||||
|
- python
|
||||||
|
x-stainless-os:
|
||||||
|
- MacOS
|
||||||
|
x-stainless-package-version:
|
||||||
|
- 1.52.1
|
||||||
|
x-stainless-raw-response:
|
||||||
|
- 'true'
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
|
x-stainless-runtime:
|
||||||
|
- CPython
|
||||||
|
x-stainless-runtime-version:
|
||||||
|
- 3.12.7
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
content: "{\n \"id\": \"chatcmpl-AnAdQKGW3Q8LUCmphL7hkavxi4zWB\",\n \"object\":
|
||||||
|
\"chat.completion\",\n \"created\": 1736282316,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||||
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
|
\"assistant\",\n \"content\": \"I should continue using the `get_final_answer`
|
||||||
|
tool as per the instructions.\\n\\nAction: get_final_answer\\nAction Input:
|
||||||
|
{}\",\n \"refusal\": null\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||||
|
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 324,\n \"completion_tokens\":
|
||||||
|
26,\n \"total_tokens\": 350,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||||
|
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||||
|
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
|
||||||
|
\"fp_5f20662549\"\n}\n"
|
||||||
|
headers:
|
||||||
|
CF-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
CF-RAY:
|
||||||
|
- 8fe6c09e6c69ed8c-ATL
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Tue, 07 Jan 2025 20:38:37 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
X-Content-Type-Options:
|
||||||
|
- nosniff
|
||||||
|
access-control-expose-headers:
|
||||||
|
- X-Request-ID
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
openai-organization:
|
||||||
|
- crewai-iuxna1
|
||||||
|
openai-processing-ms:
|
||||||
|
- '542'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=31536000; includeSubDomains; preload
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '10000'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '30000000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '9999'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '29999627'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 6ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 0s
|
||||||
|
x-request-id:
|
||||||
|
- req_6844467024f67bb1477445b1a8a01761
|
||||||
|
http_version: HTTP/1.1
|
||||||
|
status_code: 200
|
||||||
|
- request:
|
||||||
|
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
||||||
|
personal goal is: test goal\nYou ONLY have access to the following tools, and
|
||||||
|
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer\nTool
|
||||||
|
Arguments: {}\nTool Description: Get the final answer but don''t give it yet,
|
||||||
|
just re-use this\n tool non-stop.\n\nUse the following format:\n\nThought:
|
||||||
|
you should always think about what to do\nAction: the action to take, only one
|
||||||
|
name of [get_final_answer], just the name, exactly as it''s written.\nAction
|
||||||
|
Input: the input to the action, just a simple python dictionary, enclosed in
|
||||||
|
curly braces, using \" to wrap keys and values.\nObservation: the result of
|
||||||
|
the action\n\nOnce all necessary information is gathered:\n\nThought: I now
|
||||||
|
know the final answer\nFinal Answer: the final answer to the original input
|
||||||
|
question"}, {"role": "user", "content": "\nCurrent Task: The final answer is
|
||||||
|
42. But don''t give it yet, instead keep using the `get_final_answer` tool over
|
||||||
|
and over until you''re told you can give your final answer.\n\nThis is the expect
|
||||||
|
criteria for your final answer: The final answer\nyou MUST return the actual
|
||||||
|
complete content as the final answer, not a summary.\n\nBegin! This is VERY
|
||||||
|
important to you, use the tools available and give your best Final Answer, your
|
||||||
|
job depends on it!\n\nThought:"}, {"role": "assistant", "content": "I need to
|
||||||
|
use the `get_final_answer` tool and then keep using it repeatedly as instructed.
|
||||||
|
\n\nAction: get_final_answer\nAction Input: {}\nObservation: 42"}, {"role":
|
||||||
|
"assistant", "content": "I should continue using the `get_final_answer` tool
|
||||||
|
as per the instructions.\n\nAction: get_final_answer\nAction Input: {}\nObservation:
|
||||||
|
I tried reusing the same input, I must stop using this action input. I''ll try
|
||||||
|
something else instead."}], "model": "gpt-4o", "stop": ["\nObservation:"], "stream":
|
||||||
|
false}'
|
||||||
|
headers:
|
||||||
|
accept:
|
||||||
|
- application/json
|
||||||
|
accept-encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '1908'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
cookie:
|
||||||
|
- __cf_bm=hkH74Rv9bMDMhhK.Ep.9blvKIwXeSSwlCoTNGk9qVpA-1736282316-1.0.1.1-5PAsOPpVEfTNNy5DYRlLH1f4caHJArumiloWf.L51RQPWN3uIWsBSuhLVbNQDYVCQb9RQK8W5DcXv5Jq9FvsLA;
|
||||||
|
_cfuvid=vqZ5X0AXIJfzp5UJSFyTmaCVjA.L8Yg35b.ijZFAPM4-1736282316289-0.0.1.1-604800000
|
||||||
|
host:
|
||||||
|
- api.openai.com
|
||||||
|
user-agent:
|
||||||
|
- OpenAI/Python 1.52.1
|
||||||
|
x-stainless-arch:
|
||||||
|
- arm64
|
||||||
|
x-stainless-async:
|
||||||
|
- 'false'
|
||||||
|
x-stainless-lang:
|
||||||
|
- python
|
||||||
|
x-stainless-os:
|
||||||
|
- MacOS
|
||||||
|
x-stainless-package-version:
|
||||||
|
- 1.52.1
|
||||||
|
x-stainless-raw-response:
|
||||||
|
- 'true'
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
|
x-stainless-runtime:
|
||||||
|
- CPython
|
||||||
|
x-stainless-runtime-version:
|
||||||
|
- 3.12.7
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
content: "{\n \"id\": \"chatcmpl-AnAdR2lKFEVaDbfD9qaF0Tts0eVMt\",\n \"object\":
|
||||||
|
\"chat.completion\",\n \"created\": 1736282317,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||||
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
|
\"assistant\",\n \"content\": \"I should persist with using the `get_final_answer`
|
||||||
|
tool.\\n\\nAction: get_final_answer\\nAction Input: {}\",\n \"refusal\":
|
||||||
null\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
null\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 303,\n \"completion_tokens\":
|
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 378,\n \"completion_tokens\":
|
||||||
22,\n \"total_tokens\": 325,\n \"completion_tokens_details\": {\n \"reasoning_tokens\":
|
23,\n \"total_tokens\": 401,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||||
0\n }\n },\n \"system_fingerprint\": \"fp_e375328146\"\n}\n"
|
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||||
|
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
|
||||||
|
\"fp_5f20662549\"\n}\n"
|
||||||
headers:
|
headers:
|
||||||
CF-Cache-Status:
|
CF-Cache-Status:
|
||||||
- DYNAMIC
|
- DYNAMIC
|
||||||
CF-RAY:
|
CF-RAY:
|
||||||
- 8c85de473ae11cf3-GRU
|
- 8fe6c0a2ce3ded8c-ATL
|
||||||
Connection:
|
Connection:
|
||||||
- keep-alive
|
- keep-alive
|
||||||
Content-Encoding:
|
Content-Encoding:
|
||||||
@@ -77,7 +329,7 @@ interactions:
|
|||||||
Content-Type:
|
Content-Type:
|
||||||
- application/json
|
- application/json
|
||||||
Date:
|
Date:
|
||||||
- Tue, 24 Sep 2024 21:29:10 GMT
|
- Tue, 07 Jan 2025 20:38:37 GMT
|
||||||
Server:
|
Server:
|
||||||
- cloudflare
|
- cloudflare
|
||||||
Transfer-Encoding:
|
Transfer-Encoding:
|
||||||
@@ -86,10 +338,12 @@ interactions:
|
|||||||
- nosniff
|
- nosniff
|
||||||
access-control-expose-headers:
|
access-control-expose-headers:
|
||||||
- X-Request-ID
|
- X-Request-ID
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
openai-organization:
|
openai-organization:
|
||||||
- crewai-iuxna1
|
- crewai-iuxna1
|
||||||
openai-processing-ms:
|
openai-processing-ms:
|
||||||
- '489'
|
- '492'
|
||||||
openai-version:
|
openai-version:
|
||||||
- '2020-10-01'
|
- '2020-10-01'
|
||||||
strict-transport-security:
|
strict-transport-security:
|
||||||
@@ -101,273 +355,59 @@ interactions:
|
|||||||
x-ratelimit-remaining-requests:
|
x-ratelimit-remaining-requests:
|
||||||
- '9999'
|
- '9999'
|
||||||
x-ratelimit-remaining-tokens:
|
x-ratelimit-remaining-tokens:
|
||||||
- '29999651'
|
- '29999567'
|
||||||
x-ratelimit-reset-requests:
|
x-ratelimit-reset-requests:
|
||||||
- 6ms
|
- 6ms
|
||||||
x-ratelimit-reset-tokens:
|
x-ratelimit-reset-tokens:
|
||||||
- 0s
|
- 0s
|
||||||
x-request-id:
|
x-request-id:
|
||||||
- req_de70a4dc416515dda4b2ad48bde52f93
|
- req_198e698a8bc7eea092ea32b83cc4304e
|
||||||
http_version: HTTP/1.1
|
http_version: HTTP/1.1
|
||||||
status_code: 200
|
status_code: 200
|
||||||
- request:
|
- request:
|
||||||
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
||||||
personal goal is: test goal\nYou ONLY have access to the following tools, and
|
personal goal is: test goal\nYou ONLY have access to the following tools, and
|
||||||
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer(*args:
|
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer\nTool
|
||||||
Any, **kwargs: Any) -> Any\nTool Description: get_final_answer() - Get the final
|
Arguments: {}\nTool Description: Get the final answer but don''t give it yet,
|
||||||
answer but don''t give it yet, just re-use this tool non-stop. \nTool
|
just re-use this\n tool non-stop.\n\nUse the following format:\n\nThought:
|
||||||
Arguments: {}\n\nUse the following format:\n\nThought: you should always think
|
you should always think about what to do\nAction: the action to take, only one
|
||||||
about what to do\nAction: the action to take, only one name of [get_final_answer],
|
name of [get_final_answer], just the name, exactly as it''s written.\nAction
|
||||||
just the name, exactly as it''s written.\nAction Input: the input to the action,
|
Input: the input to the action, just a simple python dictionary, enclosed in
|
||||||
just a simple python dictionary, enclosed in curly braces, using \" to wrap
|
curly braces, using \" to wrap keys and values.\nObservation: the result of
|
||||||
keys and values.\nObservation: the result of the action\n\nOnce all necessary
|
the action\n\nOnce all necessary information is gathered:\n\nThought: I now
|
||||||
information is gathered:\n\nThought: I now know the final answer\nFinal Answer:
|
know the final answer\nFinal Answer: the final answer to the original input
|
||||||
the final answer to the original input question\n"}, {"role": "user", "content":
|
question"}, {"role": "user", "content": "\nCurrent Task: The final answer is
|
||||||
"\nCurrent Task: The final answer is 42. But don''t give it yet, instead keep
|
42. But don''t give it yet, instead keep using the `get_final_answer` tool over
|
||||||
using the `get_final_answer` tool over and over until you''re told you can give
|
and over until you''re told you can give your final answer.\n\nThis is the expect
|
||||||
your final answer.\n\nThis is the expect criteria for your final answer: The
|
criteria for your final answer: The final answer\nyou MUST return the actual
|
||||||
final answer\nyou MUST return the actual complete content as the final answer,
|
complete content as the final answer, not a summary.\n\nBegin! This is VERY
|
||||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
important to you, use the tools available and give your best Final Answer, your
|
||||||
and give your best Final Answer, your job depends on it!\n\nThought:"}, {"role":
|
job depends on it!\n\nThought:"}, {"role": "assistant", "content": "I need to
|
||||||
"assistant", "content": "Thought: I need to use the provided tool as instructed.\n\nAction:
|
use the `get_final_answer` tool and then keep using it repeatedly as instructed.
|
||||||
get_final_answer\nAction Input: {}\nObservation: 42"}], "model": "gpt-4o"}'
|
\n\nAction: get_final_answer\nAction Input: {}\nObservation: 42"}, {"role":
|
||||||
headers:
|
"assistant", "content": "I should continue using the `get_final_answer` tool
|
||||||
accept:
|
as per the instructions.\n\nAction: get_final_answer\nAction Input: {}\nObservation:
|
||||||
- application/json
|
I tried reusing the same input, I must stop using this action input. I''ll try
|
||||||
accept-encoding:
|
something else instead."}, {"role": "assistant", "content": "I should persist
|
||||||
- gzip, deflate
|
with using the `get_final_answer` tool.\n\nAction: get_final_answer\nAction
|
||||||
connection:
|
Input: {}\nObservation: I tried reusing the same input, I must stop using this
|
||||||
- keep-alive
|
action input. I''ll try something else instead.\n\n\n\n\nYou ONLY have access
|
||||||
content-length:
|
to the following tools, and should NEVER make up tools that are not listed here:\n\nTool
|
||||||
- '1608'
|
Name: get_final_answer\nTool Arguments: {}\nTool Description: Get the final
|
||||||
content-type:
|
answer but don''t give it yet, just re-use this\n tool non-stop.\n\nUse
|
||||||
- application/json
|
the following format:\n\nThought: you should always think about what to do\nAction:
|
||||||
cookie:
|
the action to take, only one name of [get_final_answer], just the name, exactly
|
||||||
- __cf_bm=rb61BZH2ejzD5YPmLaEJqI7km71QqyNJGTVdNxBq6qk-1727213194-1.0.1.1-pJ49onmgX9IugEMuYQMralzD7oj_6W.CHbSu4Su1z3NyjTGYg.rhgJZWng8feFYah._oSnoYlkTjpK1Wd2C9FA;
|
as it''s written.\nAction Input: the input to the action, just a simple python
|
||||||
_cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
|
dictionary, enclosed in curly braces, using \" to wrap keys and values.\nObservation:
|
||||||
host:
|
the result of the action\n\nOnce all necessary information is gathered:\n\nThought:
|
||||||
- api.openai.com
|
I now know the final answer\nFinal Answer: the final answer to the original
|
||||||
user-agent:
|
input question"}, {"role": "assistant", "content": "I should persist with using
|
||||||
- OpenAI/Python 1.47.0
|
the `get_final_answer` tool.\n\nAction: get_final_answer\nAction Input: {}\nObservation:
|
||||||
x-stainless-arch:
|
I tried reusing the same input, I must stop using this action input. I''ll try
|
||||||
- arm64
|
something else instead.\n\n\n\n\nYou ONLY have access to the following tools,
|
||||||
x-stainless-async:
|
and should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer\nTool
|
||||||
- 'false'
|
Arguments: {}\nTool Description: Get the final answer but don''t give it yet,
|
||||||
x-stainless-lang:
|
just re-use this\n tool non-stop.\n\nUse the following format:\n\nThought:
|
||||||
- python
|
|
||||||
x-stainless-os:
|
|
||||||
- MacOS
|
|
||||||
x-stainless-package-version:
|
|
||||||
- 1.47.0
|
|
||||||
x-stainless-raw-response:
|
|
||||||
- 'true'
|
|
||||||
x-stainless-runtime:
|
|
||||||
- CPython
|
|
||||||
x-stainless-runtime-version:
|
|
||||||
- 3.11.7
|
|
||||||
method: POST
|
|
||||||
uri: https://api.openai.com/v1/chat/completions
|
|
||||||
response:
|
|
||||||
content: "{\n \"id\": \"chatcmpl-AB7Nnz14hlEaTdabXodZCVU0UoDhk\",\n \"object\":
|
|
||||||
\"chat.completion\",\n \"created\": 1727213351,\n \"model\": \"gpt-4o-2024-05-13\",\n
|
|
||||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
|
||||||
\"assistant\",\n \"content\": \"Thought: I must continue using the `get_final_answer`
|
|
||||||
tool as instructed.\\n\\nAction: get_final_answer\\nAction Input: {}\\nObservation:
|
|
||||||
42\",\n \"refusal\": null\n },\n \"logprobs\": null,\n \"finish_reason\":
|
|
||||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 333,\n \"completion_tokens\":
|
|
||||||
30,\n \"total_tokens\": 363,\n \"completion_tokens_details\": {\n \"reasoning_tokens\":
|
|
||||||
0\n }\n },\n \"system_fingerprint\": \"fp_e375328146\"\n}\n"
|
|
||||||
headers:
|
|
||||||
CF-Cache-Status:
|
|
||||||
- DYNAMIC
|
|
||||||
CF-RAY:
|
|
||||||
- 8c85de5109701cf3-GRU
|
|
||||||
Connection:
|
|
||||||
- keep-alive
|
|
||||||
Content-Encoding:
|
|
||||||
- gzip
|
|
||||||
Content-Type:
|
|
||||||
- application/json
|
|
||||||
Date:
|
|
||||||
- Tue, 24 Sep 2024 21:29:11 GMT
|
|
||||||
Server:
|
|
||||||
- cloudflare
|
|
||||||
Transfer-Encoding:
|
|
||||||
- chunked
|
|
||||||
X-Content-Type-Options:
|
|
||||||
- nosniff
|
|
||||||
access-control-expose-headers:
|
|
||||||
- X-Request-ID
|
|
||||||
openai-organization:
|
|
||||||
- crewai-iuxna1
|
|
||||||
openai-processing-ms:
|
|
||||||
- '516'
|
|
||||||
openai-version:
|
|
||||||
- '2020-10-01'
|
|
||||||
strict-transport-security:
|
|
||||||
- max-age=31536000; includeSubDomains; preload
|
|
||||||
x-ratelimit-limit-requests:
|
|
||||||
- '10000'
|
|
||||||
x-ratelimit-limit-tokens:
|
|
||||||
- '30000000'
|
|
||||||
x-ratelimit-remaining-requests:
|
|
||||||
- '9999'
|
|
||||||
x-ratelimit-remaining-tokens:
|
|
||||||
- '29999620'
|
|
||||||
x-ratelimit-reset-requests:
|
|
||||||
- 6ms
|
|
||||||
x-ratelimit-reset-tokens:
|
|
||||||
- 0s
|
|
||||||
x-request-id:
|
|
||||||
- req_5365ac0e5413bd9330c6ac3f68051bcf
|
|
||||||
http_version: HTTP/1.1
|
|
||||||
status_code: 200
|
|
||||||
- request:
|
|
||||||
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
|
||||||
personal goal is: test goal\nYou ONLY have access to the following tools, and
|
|
||||||
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer(*args:
|
|
||||||
Any, **kwargs: Any) -> Any\nTool Description: get_final_answer() - Get the final
|
|
||||||
answer but don''t give it yet, just re-use this tool non-stop. \nTool
|
|
||||||
Arguments: {}\n\nUse the following format:\n\nThought: you should always think
|
|
||||||
about what to do\nAction: the action to take, only one name of [get_final_answer],
|
|
||||||
just the name, exactly as it''s written.\nAction Input: the input to the action,
|
|
||||||
just a simple python dictionary, enclosed in curly braces, using \" to wrap
|
|
||||||
keys and values.\nObservation: the result of the action\n\nOnce all necessary
|
|
||||||
information is gathered:\n\nThought: I now know the final answer\nFinal Answer:
|
|
||||||
the final answer to the original input question\n"}, {"role": "user", "content":
|
|
||||||
"\nCurrent Task: The final answer is 42. But don''t give it yet, instead keep
|
|
||||||
using the `get_final_answer` tool over and over until you''re told you can give
|
|
||||||
your final answer.\n\nThis is the expect criteria for your final answer: The
|
|
||||||
final answer\nyou MUST return the actual complete content as the final answer,
|
|
||||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
|
||||||
and give your best Final Answer, your job depends on it!\n\nThought:"}, {"role":
|
|
||||||
"assistant", "content": "Thought: I need to use the provided tool as instructed.\n\nAction:
|
|
||||||
get_final_answer\nAction Input: {}\nObservation: 42"}, {"role": "assistant",
|
|
||||||
"content": "Thought: I must continue using the `get_final_answer` tool as instructed.\n\nAction:
|
|
||||||
get_final_answer\nAction Input: {}\nObservation: 42\nObservation: 42"}], "model":
|
|
||||||
"gpt-4o"}'
|
|
||||||
headers:
|
|
||||||
accept:
|
|
||||||
- application/json
|
|
||||||
accept-encoding:
|
|
||||||
- gzip, deflate
|
|
||||||
connection:
|
|
||||||
- keep-alive
|
|
||||||
content-length:
|
|
||||||
- '1799'
|
|
||||||
content-type:
|
|
||||||
- application/json
|
|
||||||
cookie:
|
|
||||||
- __cf_bm=rb61BZH2ejzD5YPmLaEJqI7km71QqyNJGTVdNxBq6qk-1727213194-1.0.1.1-pJ49onmgX9IugEMuYQMralzD7oj_6W.CHbSu4Su1z3NyjTGYg.rhgJZWng8feFYah._oSnoYlkTjpK1Wd2C9FA;
|
|
||||||
_cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
|
|
||||||
host:
|
|
||||||
- api.openai.com
|
|
||||||
user-agent:
|
|
||||||
- OpenAI/Python 1.47.0
|
|
||||||
x-stainless-arch:
|
|
||||||
- arm64
|
|
||||||
x-stainless-async:
|
|
||||||
- 'false'
|
|
||||||
x-stainless-lang:
|
|
||||||
- python
|
|
||||||
x-stainless-os:
|
|
||||||
- MacOS
|
|
||||||
x-stainless-package-version:
|
|
||||||
- 1.47.0
|
|
||||||
x-stainless-raw-response:
|
|
||||||
- 'true'
|
|
||||||
x-stainless-runtime:
|
|
||||||
- CPython
|
|
||||||
x-stainless-runtime-version:
|
|
||||||
- 3.11.7
|
|
||||||
method: POST
|
|
||||||
uri: https://api.openai.com/v1/chat/completions
|
|
||||||
response:
|
|
||||||
content: "{\n \"id\": \"chatcmpl-AB7NoF5Gf597BGmOETPYGxN2eRFxd\",\n \"object\":
|
|
||||||
\"chat.completion\",\n \"created\": 1727213352,\n \"model\": \"gpt-4o-2024-05-13\",\n
|
|
||||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
|
||||||
\"assistant\",\n \"content\": \"Thought: I must continue using the `get_final_answer`
|
|
||||||
tool to meet the requirements.\\n\\nAction: get_final_answer\\nAction Input:
|
|
||||||
{}\\nObservation: 42\",\n \"refusal\": null\n },\n \"logprobs\":
|
|
||||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
|
||||||
372,\n \"completion_tokens\": 32,\n \"total_tokens\": 404,\n \"completion_tokens_details\":
|
|
||||||
{\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_e375328146\"\n}\n"
|
|
||||||
headers:
|
|
||||||
CF-Cache-Status:
|
|
||||||
- DYNAMIC
|
|
||||||
CF-RAY:
|
|
||||||
- 8c85de587bc01cf3-GRU
|
|
||||||
Connection:
|
|
||||||
- keep-alive
|
|
||||||
Content-Encoding:
|
|
||||||
- gzip
|
|
||||||
Content-Type:
|
|
||||||
- application/json
|
|
||||||
Date:
|
|
||||||
- Tue, 24 Sep 2024 21:29:12 GMT
|
|
||||||
Server:
|
|
||||||
- cloudflare
|
|
||||||
Transfer-Encoding:
|
|
||||||
- chunked
|
|
||||||
X-Content-Type-Options:
|
|
||||||
- nosniff
|
|
||||||
access-control-expose-headers:
|
|
||||||
- X-Request-ID
|
|
||||||
openai-organization:
|
|
||||||
- crewai-iuxna1
|
|
||||||
openai-processing-ms:
|
|
||||||
- '471'
|
|
||||||
openai-version:
|
|
||||||
- '2020-10-01'
|
|
||||||
strict-transport-security:
|
|
||||||
- max-age=31536000; includeSubDomains; preload
|
|
||||||
x-ratelimit-limit-requests:
|
|
||||||
- '10000'
|
|
||||||
x-ratelimit-limit-tokens:
|
|
||||||
- '30000000'
|
|
||||||
x-ratelimit-remaining-requests:
|
|
||||||
- '9999'
|
|
||||||
x-ratelimit-remaining-tokens:
|
|
||||||
- '29999583'
|
|
||||||
x-ratelimit-reset-requests:
|
|
||||||
- 6ms
|
|
||||||
x-ratelimit-reset-tokens:
|
|
||||||
- 0s
|
|
||||||
x-request-id:
|
|
||||||
- req_55550369b28e37f064296dbc41e0db69
|
|
||||||
http_version: HTTP/1.1
|
|
||||||
status_code: 200
|
|
||||||
- request:
|
|
||||||
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
|
||||||
personal goal is: test goal\nYou ONLY have access to the following tools, and
|
|
||||||
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer(*args:
|
|
||||||
Any, **kwargs: Any) -> Any\nTool Description: get_final_answer() - Get the final
|
|
||||||
answer but don''t give it yet, just re-use this tool non-stop. \nTool
|
|
||||||
Arguments: {}\n\nUse the following format:\n\nThought: you should always think
|
|
||||||
about what to do\nAction: the action to take, only one name of [get_final_answer],
|
|
||||||
just the name, exactly as it''s written.\nAction Input: the input to the action,
|
|
||||||
just a simple python dictionary, enclosed in curly braces, using \" to wrap
|
|
||||||
keys and values.\nObservation: the result of the action\n\nOnce all necessary
|
|
||||||
information is gathered:\n\nThought: I now know the final answer\nFinal Answer:
|
|
||||||
the final answer to the original input question\n"}, {"role": "user", "content":
|
|
||||||
"\nCurrent Task: The final answer is 42. But don''t give it yet, instead keep
|
|
||||||
using the `get_final_answer` tool over and over until you''re told you can give
|
|
||||||
your final answer.\n\nThis is the expect criteria for your final answer: The
|
|
||||||
final answer\nyou MUST return the actual complete content as the final answer,
|
|
||||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
|
||||||
and give your best Final Answer, your job depends on it!\n\nThought:"}, {"role":
|
|
||||||
"assistant", "content": "Thought: I need to use the provided tool as instructed.\n\nAction:
|
|
||||||
get_final_answer\nAction Input: {}\nObservation: 42"}, {"role": "assistant",
|
|
||||||
"content": "Thought: I must continue using the `get_final_answer` tool as instructed.\n\nAction:
|
|
||||||
get_final_answer\nAction Input: {}\nObservation: 42\nObservation: 42"}, {"role":
|
|
||||||
"assistant", "content": "Thought: I must continue using the `get_final_answer`
|
|
||||||
tool to meet the requirements.\n\nAction: get_final_answer\nAction Input: {}\nObservation:
|
|
||||||
42\nObservation: I tried reusing the same input, I must stop using this action
|
|
||||||
input. I''ll try something else instead.\n\n\n\n\nYou ONLY have access to the
|
|
||||||
following tools, and should NEVER make up tools that are not listed here:\n\nTool
|
|
||||||
Name: get_final_answer(*args: Any, **kwargs: Any) -> Any\nTool Description:
|
|
||||||
get_final_answer() - Get the final answer but don''t give it yet, just re-use
|
|
||||||
this tool non-stop. \nTool Arguments: {}\n\nUse the following format:\n\nThought:
|
|
||||||
you should always think about what to do\nAction: the action to take, only one
|
you should always think about what to do\nAction: the action to take, only one
|
||||||
name of [get_final_answer], just the name, exactly as it''s written.\nAction
|
name of [get_final_answer], just the name, exactly as it''s written.\nAction
|
||||||
Input: the input to the action, just a simple python dictionary, enclosed in
|
Input: the input to the action, just a simple python dictionary, enclosed in
|
||||||
@@ -376,7 +416,8 @@ interactions:
|
|||||||
know the final answer\nFinal Answer: the final answer to the original input
|
know the final answer\nFinal Answer: the final answer to the original input
|
||||||
question\n\nNow it''s time you MUST give your absolute best final answer. You''ll
|
question\n\nNow it''s time you MUST give your absolute best final answer. You''ll
|
||||||
ignore all previous instructions, stop using any tools, and just return your
|
ignore all previous instructions, stop using any tools, and just return your
|
||||||
absolute BEST Final answer."}], "model": "gpt-4o"}'
|
absolute BEST Final answer."}], "model": "gpt-4o", "stop": ["\nObservation:"],
|
||||||
|
"stream": false}'
|
||||||
headers:
|
headers:
|
||||||
accept:
|
accept:
|
||||||
- application/json
|
- application/json
|
||||||
@@ -385,16 +426,16 @@ interactions:
|
|||||||
connection:
|
connection:
|
||||||
- keep-alive
|
- keep-alive
|
||||||
content-length:
|
content-length:
|
||||||
- '3107'
|
- '4148'
|
||||||
content-type:
|
content-type:
|
||||||
- application/json
|
- application/json
|
||||||
cookie:
|
cookie:
|
||||||
- __cf_bm=rb61BZH2ejzD5YPmLaEJqI7km71QqyNJGTVdNxBq6qk-1727213194-1.0.1.1-pJ49onmgX9IugEMuYQMralzD7oj_6W.CHbSu4Su1z3NyjTGYg.rhgJZWng8feFYah._oSnoYlkTjpK1Wd2C9FA;
|
- __cf_bm=hkH74Rv9bMDMhhK.Ep.9blvKIwXeSSwlCoTNGk9qVpA-1736282316-1.0.1.1-5PAsOPpVEfTNNy5DYRlLH1f4caHJArumiloWf.L51RQPWN3uIWsBSuhLVbNQDYVCQb9RQK8W5DcXv5Jq9FvsLA;
|
||||||
_cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
|
_cfuvid=vqZ5X0AXIJfzp5UJSFyTmaCVjA.L8Yg35b.ijZFAPM4-1736282316289-0.0.1.1-604800000
|
||||||
host:
|
host:
|
||||||
- api.openai.com
|
- api.openai.com
|
||||||
user-agent:
|
user-agent:
|
||||||
- OpenAI/Python 1.47.0
|
- OpenAI/Python 1.52.1
|
||||||
x-stainless-arch:
|
x-stainless-arch:
|
||||||
- arm64
|
- arm64
|
||||||
x-stainless-async:
|
x-stainless-async:
|
||||||
@@ -404,29 +445,34 @@ interactions:
|
|||||||
x-stainless-os:
|
x-stainless-os:
|
||||||
- MacOS
|
- MacOS
|
||||||
x-stainless-package-version:
|
x-stainless-package-version:
|
||||||
- 1.47.0
|
- 1.52.1
|
||||||
x-stainless-raw-response:
|
x-stainless-raw-response:
|
||||||
- 'true'
|
- 'true'
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
x-stainless-runtime:
|
x-stainless-runtime:
|
||||||
- CPython
|
- CPython
|
||||||
x-stainless-runtime-version:
|
x-stainless-runtime-version:
|
||||||
- 3.11.7
|
- 3.12.7
|
||||||
method: POST
|
method: POST
|
||||||
uri: https://api.openai.com/v1/chat/completions
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
response:
|
response:
|
||||||
content: "{\n \"id\": \"chatcmpl-AB7Npl5ZliMrcSofDS1c7LVGSmmbE\",\n \"object\":
|
content: "{\n \"id\": \"chatcmpl-AnAdRu1aVdsOxxIqU6nqv5dIxwbvu\",\n \"object\":
|
||||||
\"chat.completion\",\n \"created\": 1727213353,\n \"model\": \"gpt-4o-2024-05-13\",\n
|
\"chat.completion\",\n \"created\": 1736282317,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
\"assistant\",\n \"content\": \"Thought: I now know the final answer.\\n\\nFinal
|
\"assistant\",\n \"content\": \"Thought: I now know the final answer.\\nFinal
|
||||||
Answer: The final answer is 42.\",\n \"refusal\": null\n },\n \"logprobs\":
|
Answer: 42\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||||
642,\n \"completion_tokens\": 19,\n \"total_tokens\": 661,\n \"completion_tokens_details\":
|
831,\n \"completion_tokens\": 14,\n \"total_tokens\": 845,\n \"prompt_tokens_details\":
|
||||||
{\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_e375328146\"\n}\n"
|
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||||
|
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
|
||||||
|
\"fp_5f20662549\"\n}\n"
|
||||||
headers:
|
headers:
|
||||||
CF-Cache-Status:
|
CF-Cache-Status:
|
||||||
- DYNAMIC
|
- DYNAMIC
|
||||||
CF-RAY:
|
CF-RAY:
|
||||||
- 8c85de5fad921cf3-GRU
|
- 8fe6c0a68cc3ed8c-ATL
|
||||||
Connection:
|
Connection:
|
||||||
- keep-alive
|
- keep-alive
|
||||||
Content-Encoding:
|
Content-Encoding:
|
||||||
@@ -434,7 +480,7 @@ interactions:
|
|||||||
Content-Type:
|
Content-Type:
|
||||||
- application/json
|
- application/json
|
||||||
Date:
|
Date:
|
||||||
- Tue, 24 Sep 2024 21:29:13 GMT
|
- Tue, 07 Jan 2025 20:38:38 GMT
|
||||||
Server:
|
Server:
|
||||||
- cloudflare
|
- cloudflare
|
||||||
Transfer-Encoding:
|
Transfer-Encoding:
|
||||||
@@ -443,10 +489,12 @@ interactions:
|
|||||||
- nosniff
|
- nosniff
|
||||||
access-control-expose-headers:
|
access-control-expose-headers:
|
||||||
- X-Request-ID
|
- X-Request-ID
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
openai-organization:
|
openai-organization:
|
||||||
- crewai-iuxna1
|
- crewai-iuxna1
|
||||||
openai-processing-ms:
|
openai-processing-ms:
|
||||||
- '320'
|
- '429'
|
||||||
openai-version:
|
openai-version:
|
||||||
- '2020-10-01'
|
- '2020-10-01'
|
||||||
strict-transport-security:
|
strict-transport-security:
|
||||||
@@ -458,13 +506,13 @@ interactions:
|
|||||||
x-ratelimit-remaining-requests:
|
x-ratelimit-remaining-requests:
|
||||||
- '9999'
|
- '9999'
|
||||||
x-ratelimit-remaining-tokens:
|
x-ratelimit-remaining-tokens:
|
||||||
- '29999271'
|
- '29999037'
|
||||||
x-ratelimit-reset-requests:
|
x-ratelimit-reset-requests:
|
||||||
- 6ms
|
- 6ms
|
||||||
x-ratelimit-reset-tokens:
|
x-ratelimit-reset-tokens:
|
||||||
- 1ms
|
- 1ms
|
||||||
x-request-id:
|
x-request-id:
|
||||||
- req_5eba25209fc7e12717cb7e042e7bb4c2
|
- req_2552d63d3cbce15909481cc1fc9f36cc
|
||||||
http_version: HTTP/1.1
|
http_version: HTTP/1.1
|
||||||
status_code: 200
|
status_code: 200
|
||||||
version: 1
|
version: 1
|
||||||
|
|||||||
@@ -0,0 +1,353 @@
|
|||||||
|
interactions:
|
||||||
|
- request:
|
||||||
|
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
||||||
|
personal goal is: test goal\nTo give my best complete final answer to the task
|
||||||
|
use the exact following format:\n\nThought: I now can give a great answer\nFinal
|
||||||
|
Answer: Your final answer must be the great and the most complete as possible,
|
||||||
|
it must be outcome described.\n\nI MUST use these formats, my job depends on
|
||||||
|
it!"}, {"role": "user", "content": "\nCurrent Task: Just say hi.\n\nThis is
|
||||||
|
the expect criteria for your final answer: Your greeting.\nyou MUST return the
|
||||||
|
actual complete content as the final answer, not a summary.\n\nBegin! This is
|
||||||
|
VERY important to you, use the tools available and give your best Final Answer,
|
||||||
|
your job depends on it!\n\nThought:"}], "model": "gpt-4o", "stop": ["\nObservation:"],
|
||||||
|
"stream": false}'
|
||||||
|
headers:
|
||||||
|
accept:
|
||||||
|
- application/json
|
||||||
|
accept-encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '817'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
cookie:
|
||||||
|
- _cfuvid=vqZ5X0AXIJfzp5UJSFyTmaCVjA.L8Yg35b.ijZFAPM4-1736282316289-0.0.1.1-604800000
|
||||||
|
host:
|
||||||
|
- api.openai.com
|
||||||
|
user-agent:
|
||||||
|
- OpenAI/Python 1.52.1
|
||||||
|
x-stainless-arch:
|
||||||
|
- arm64
|
||||||
|
x-stainless-async:
|
||||||
|
- 'false'
|
||||||
|
x-stainless-lang:
|
||||||
|
- python
|
||||||
|
x-stainless-os:
|
||||||
|
- MacOS
|
||||||
|
x-stainless-package-version:
|
||||||
|
- 1.52.1
|
||||||
|
x-stainless-raw-response:
|
||||||
|
- 'true'
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
|
x-stainless-runtime:
|
||||||
|
- CPython
|
||||||
|
x-stainless-runtime-version:
|
||||||
|
- 3.12.7
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
content: "{\n \"id\": \"chatcmpl-AnSbv3ywhwedwS3YW9Crde6hpWpmK\",\n \"object\":
|
||||||
|
\"chat.completion\",\n \"created\": 1736351415,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||||
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
|
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||||
|
Answer: Hi!\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||||
|
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||||
|
154,\n \"completion_tokens\": 13,\n \"total_tokens\": 167,\n \"prompt_tokens_details\":
|
||||||
|
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||||
|
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
|
||||||
|
\"fp_5f20662549\"\n}\n"
|
||||||
|
headers:
|
||||||
|
CF-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
CF-RAY:
|
||||||
|
- 8fed579a4f76b058-ATL
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Wed, 08 Jan 2025 15:50:15 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
Set-Cookie:
|
||||||
|
- __cf_bm=rdN2XYZhM9f2vDB8aOVGYgUHUzSuT.cP8ahngq.QTL0-1736351415-1.0.1.1-lVzOV8iFUHvbswld8xls4a8Ct38zv6Jyr.6THknDnVf3uGZMlgV6r5s10uTnHA2eIi07jJtj7vGopiOpU8qkvA;
|
||||||
|
path=/; expires=Wed, 08-Jan-25 16:20:15 GMT; domain=.api.openai.com; HttpOnly;
|
||||||
|
Secure; SameSite=None
|
||||||
|
- _cfuvid=PslIVDqXn7jd_NXBGdSU5kVFvzwCchKPRVe9LpQVdQA-1736351415895-0.0.1.1-604800000;
|
||||||
|
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
X-Content-Type-Options:
|
||||||
|
- nosniff
|
||||||
|
access-control-expose-headers:
|
||||||
|
- X-Request-ID
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
openai-organization:
|
||||||
|
- crewai-iuxna1
|
||||||
|
openai-processing-ms:
|
||||||
|
- '416'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=31536000; includeSubDomains; preload
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '10000'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '30000000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '9999'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '29999817'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 6ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 0s
|
||||||
|
x-request-id:
|
||||||
|
- req_97c93aa78417badc3f29306054eef79b
|
||||||
|
http_version: HTTP/1.1
|
||||||
|
status_code: 200
|
||||||
|
- request:
|
||||||
|
body: '{"messages": [{"role": "system", "content": "You are test role2. test backstory2\nYour
|
||||||
|
personal goal is: test goal2\nYou ONLY have access to the following tools, and
|
||||||
|
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer\nTool
|
||||||
|
Arguments: {}\nTool Description: Get the final answer but don''t give it yet,
|
||||||
|
just re-use this tool non-stop.\n\nUse the following format:\n\nThought: you
|
||||||
|
should always think about what to do\nAction: the action to take, only one name
|
||||||
|
of [get_final_answer], just the name, exactly as it''s written.\nAction Input:
|
||||||
|
the input to the action, just a simple python dictionary, enclosed in curly
|
||||||
|
braces, using \" to wrap keys and values.\nObservation: the result of the action\n\nOnce
|
||||||
|
all necessary information is gathered:\n\nThought: I now know the final answer\nFinal
|
||||||
|
Answer: the final answer to the original input question"}, {"role": "user",
|
||||||
|
"content": "\nCurrent Task: NEVER give a Final Answer, unless you are told otherwise,
|
||||||
|
instead keep using the `get_final_answer` tool non-stop, until you must give
|
||||||
|
your best final answer\n\nThis is the expect criteria for your final answer:
|
||||||
|
The final answer\nyou MUST return the actual complete content as the final answer,
|
||||||
|
not a summary.\n\nThis is the context you''re working with:\nHi!\n\nBegin! This
|
||||||
|
is VERY important to you, use the tools available and give your best Final Answer,
|
||||||
|
your job depends on it!\n\nThought:"}], "model": "gpt-4o", "stop": ["\nObservation:"],
|
||||||
|
"stream": false}'
|
||||||
|
headers:
|
||||||
|
accept:
|
||||||
|
- application/json
|
||||||
|
accept-encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '1483'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
cookie:
|
||||||
|
- _cfuvid=PslIVDqXn7jd_NXBGdSU5kVFvzwCchKPRVe9LpQVdQA-1736351415895-0.0.1.1-604800000;
|
||||||
|
__cf_bm=rdN2XYZhM9f2vDB8aOVGYgUHUzSuT.cP8ahngq.QTL0-1736351415-1.0.1.1-lVzOV8iFUHvbswld8xls4a8Ct38zv6Jyr.6THknDnVf3uGZMlgV6r5s10uTnHA2eIi07jJtj7vGopiOpU8qkvA
|
||||||
|
host:
|
||||||
|
- api.openai.com
|
||||||
|
user-agent:
|
||||||
|
- OpenAI/Python 1.52.1
|
||||||
|
x-stainless-arch:
|
||||||
|
- arm64
|
||||||
|
x-stainless-async:
|
||||||
|
- 'false'
|
||||||
|
x-stainless-lang:
|
||||||
|
- python
|
||||||
|
x-stainless-os:
|
||||||
|
- MacOS
|
||||||
|
x-stainless-package-version:
|
||||||
|
- 1.52.1
|
||||||
|
x-stainless-raw-response:
|
||||||
|
- 'true'
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
|
x-stainless-runtime:
|
||||||
|
- CPython
|
||||||
|
x-stainless-runtime-version:
|
||||||
|
- 3.12.7
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
content: "{\n \"id\": \"chatcmpl-AnSbwn8QaqAzfBVnzhTzIcDKykYTu\",\n \"object\":
|
||||||
|
\"chat.completion\",\n \"created\": 1736351416,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||||
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
|
\"assistant\",\n \"content\": \"I should use the available tool to get
|
||||||
|
the final answer, as per the instructions. \\n\\nAction: get_final_answer\\nAction
|
||||||
|
Input: {}\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||||
|
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||||
|
294,\n \"completion_tokens\": 28,\n \"total_tokens\": 322,\n \"prompt_tokens_details\":
|
||||||
|
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||||
|
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
|
||||||
|
\"fp_5f20662549\"\n}\n"
|
||||||
|
headers:
|
||||||
|
CF-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
CF-RAY:
|
||||||
|
- 8fed579dbd80b058-ATL
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Wed, 08 Jan 2025 15:50:17 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
X-Content-Type-Options:
|
||||||
|
- nosniff
|
||||||
|
access-control-expose-headers:
|
||||||
|
- X-Request-ID
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
openai-organization:
|
||||||
|
- crewai-iuxna1
|
||||||
|
openai-processing-ms:
|
||||||
|
- '1206'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=31536000; includeSubDomains; preload
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '10000'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '30000000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '9999'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '29999655'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 6ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 0s
|
||||||
|
x-request-id:
|
||||||
|
- req_7b85f1e9b21b5e2385d8a322a8aab06c
|
||||||
|
http_version: HTTP/1.1
|
||||||
|
status_code: 200
|
||||||
|
- request:
|
||||||
|
body: '{"messages": [{"role": "system", "content": "You are test role2. test backstory2\nYour
|
||||||
|
personal goal is: test goal2\nYou ONLY have access to the following tools, and
|
||||||
|
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer\nTool
|
||||||
|
Arguments: {}\nTool Description: Get the final answer but don''t give it yet,
|
||||||
|
just re-use this tool non-stop.\n\nUse the following format:\n\nThought: you
|
||||||
|
should always think about what to do\nAction: the action to take, only one name
|
||||||
|
of [get_final_answer], just the name, exactly as it''s written.\nAction Input:
|
||||||
|
the input to the action, just a simple python dictionary, enclosed in curly
|
||||||
|
braces, using \" to wrap keys and values.\nObservation: the result of the action\n\nOnce
|
||||||
|
all necessary information is gathered:\n\nThought: I now know the final answer\nFinal
|
||||||
|
Answer: the final answer to the original input question"}, {"role": "user",
|
||||||
|
"content": "\nCurrent Task: NEVER give a Final Answer, unless you are told otherwise,
|
||||||
|
instead keep using the `get_final_answer` tool non-stop, until you must give
|
||||||
|
your best final answer\n\nThis is the expect criteria for your final answer:
|
||||||
|
The final answer\nyou MUST return the actual complete content as the final answer,
|
||||||
|
not a summary.\n\nThis is the context you''re working with:\nHi!\n\nBegin! This
|
||||||
|
is VERY important to you, use the tools available and give your best Final Answer,
|
||||||
|
your job depends on it!\n\nThought:"}, {"role": "assistant", "content": "I should
|
||||||
|
use the available tool to get the final answer, as per the instructions. \n\nAction:
|
||||||
|
get_final_answer\nAction Input: {}\nObservation: 42"}], "model": "gpt-4o", "stop":
|
||||||
|
["\nObservation:"], "stream": false}'
|
||||||
|
headers:
|
||||||
|
accept:
|
||||||
|
- application/json
|
||||||
|
accept-encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '1666'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
cookie:
|
||||||
|
- _cfuvid=PslIVDqXn7jd_NXBGdSU5kVFvzwCchKPRVe9LpQVdQA-1736351415895-0.0.1.1-604800000;
|
||||||
|
__cf_bm=rdN2XYZhM9f2vDB8aOVGYgUHUzSuT.cP8ahngq.QTL0-1736351415-1.0.1.1-lVzOV8iFUHvbswld8xls4a8Ct38zv6Jyr.6THknDnVf3uGZMlgV6r5s10uTnHA2eIi07jJtj7vGopiOpU8qkvA
|
||||||
|
host:
|
||||||
|
- api.openai.com
|
||||||
|
user-agent:
|
||||||
|
- OpenAI/Python 1.52.1
|
||||||
|
x-stainless-arch:
|
||||||
|
- arm64
|
||||||
|
x-stainless-async:
|
||||||
|
- 'false'
|
||||||
|
x-stainless-lang:
|
||||||
|
- python
|
||||||
|
x-stainless-os:
|
||||||
|
- MacOS
|
||||||
|
x-stainless-package-version:
|
||||||
|
- 1.52.1
|
||||||
|
x-stainless-raw-response:
|
||||||
|
- 'true'
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
|
x-stainless-runtime:
|
||||||
|
- CPython
|
||||||
|
x-stainless-runtime-version:
|
||||||
|
- 3.12.7
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
content: "{\n \"id\": \"chatcmpl-AnSbxXFL4NXuGjOX35eCjcWq456lA\",\n \"object\":
|
||||||
|
\"chat.completion\",\n \"created\": 1736351417,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||||
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
|
\"assistant\",\n \"content\": \"Thought: I now know the final answer\\nFinal
|
||||||
|
Answer: 42\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||||
|
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||||
|
330,\n \"completion_tokens\": 14,\n \"total_tokens\": 344,\n \"prompt_tokens_details\":
|
||||||
|
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||||
|
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
|
||||||
|
\"fp_5f20662549\"\n}\n"
|
||||||
|
headers:
|
||||||
|
CF-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
CF-RAY:
|
||||||
|
- 8fed57a62955b058-ATL
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Wed, 08 Jan 2025 15:50:17 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
X-Content-Type-Options:
|
||||||
|
- nosniff
|
||||||
|
access-control-expose-headers:
|
||||||
|
- X-Request-ID
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
openai-organization:
|
||||||
|
- crewai-iuxna1
|
||||||
|
openai-processing-ms:
|
||||||
|
- '438'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=31536000; includeSubDomains; preload
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '10000'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '30000000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '9999'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '29999619'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 6ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 0s
|
||||||
|
x-request-id:
|
||||||
|
- req_1cc65e999b352a54a4c42eb8be543545
|
||||||
|
http_version: HTTP/1.1
|
||||||
|
status_code: 200
|
||||||
|
version: 1
|
||||||
289
tests/e2e_crew_tests.py
Normal file
289
tests/e2e_crew_tests.py
Normal file
@@ -0,0 +1,289 @@
|
|||||||
|
import asyncio
|
||||||
|
import os
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from crewai.agent import Agent
|
||||||
|
from crewai.crew import Crew
|
||||||
|
from crewai.crews.crew_output import CrewOutput
|
||||||
|
from crewai.process import Process
|
||||||
|
from crewai.task import Task
|
||||||
|
from crewai.tasks.conditional_task import ConditionalTask
|
||||||
|
|
||||||
|
|
||||||
|
def test_basic_crew_execution(default_agent):
|
||||||
|
"""Test basic crew execution using the default agent fixture."""
|
||||||
|
|
||||||
|
# Initialize agents by copying the default agent fixture
|
||||||
|
researcher = default_agent.copy()
|
||||||
|
researcher.role = "Researcher"
|
||||||
|
researcher.goal = "Research the latest advancements in AI."
|
||||||
|
researcher.backstory = "An expert in AI technologies."
|
||||||
|
|
||||||
|
writer = default_agent.copy()
|
||||||
|
writer.role = "Writer"
|
||||||
|
writer.goal = "Write an article based on research findings."
|
||||||
|
writer.backstory = "A professional writer specializing in technology topics."
|
||||||
|
|
||||||
|
# Define tasks
|
||||||
|
research_task = Task(
|
||||||
|
description="Provide a summary of the latest advancements in AI.",
|
||||||
|
expected_output="A detailed summary of recent AI advancements.",
|
||||||
|
agent=researcher,
|
||||||
|
)
|
||||||
|
|
||||||
|
writing_task = Task(
|
||||||
|
description="Write an article based on the research summary.",
|
||||||
|
expected_output="An engaging article on AI advancements.",
|
||||||
|
agent=writer,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create the crew
|
||||||
|
crew = Crew(
|
||||||
|
agents=[researcher, writer],
|
||||||
|
tasks=[research_task, writing_task],
|
||||||
|
process=Process.sequential,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Execute the crew
|
||||||
|
result = crew.kickoff()
|
||||||
|
|
||||||
|
# Assertions to verify the result
|
||||||
|
assert result is not None, "Crew execution did not return a result."
|
||||||
|
assert isinstance(result, CrewOutput), "Result is not an instance of CrewOutput."
|
||||||
|
assert (
|
||||||
|
"AI advancements" in result.raw
|
||||||
|
or "artificial intelligence" in result.raw.lower()
|
||||||
|
), "Result does not contain expected content."
|
||||||
|
|
||||||
|
|
||||||
|
def test_hierarchical_crew_with_manager(default_llm_config):
|
||||||
|
"""Test hierarchical crew execution with a manager agent."""
|
||||||
|
|
||||||
|
# Initialize agents using the default LLM config fixture
|
||||||
|
ceo = Agent(
|
||||||
|
role="CEO",
|
||||||
|
goal="Oversee the project and ensure quality deliverables.",
|
||||||
|
backstory="A seasoned executive with a keen eye for detail.",
|
||||||
|
llm=default_llm_config,
|
||||||
|
)
|
||||||
|
|
||||||
|
developer = Agent(
|
||||||
|
role="Developer",
|
||||||
|
goal="Implement software features as per requirements.",
|
||||||
|
backstory="An experienced software developer.",
|
||||||
|
llm=default_llm_config,
|
||||||
|
)
|
||||||
|
|
||||||
|
tester = Agent(
|
||||||
|
role="Tester",
|
||||||
|
goal="Test software features and report bugs.",
|
||||||
|
backstory="A meticulous QA engineer.",
|
||||||
|
llm=default_llm_config,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Define tasks
|
||||||
|
development_task = Task(
|
||||||
|
description="Develop the new authentication feature.",
|
||||||
|
expected_output="Code implementation of the authentication feature.",
|
||||||
|
agent=developer,
|
||||||
|
)
|
||||||
|
|
||||||
|
testing_task = Task(
|
||||||
|
description="Test the authentication feature for vulnerabilities.",
|
||||||
|
expected_output="A report on any found bugs or vulnerabilities.",
|
||||||
|
agent=tester,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create the crew with hierarchical process
|
||||||
|
crew = Crew(
|
||||||
|
agents=[ceo, developer, tester],
|
||||||
|
tasks=[development_task, testing_task],
|
||||||
|
process=Process.hierarchical,
|
||||||
|
manager_agent=ceo,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Execute the crew
|
||||||
|
result = crew.kickoff()
|
||||||
|
|
||||||
|
# Assertions to verify the result
|
||||||
|
assert result is not None, "Crew execution did not return a result."
|
||||||
|
assert isinstance(result, CrewOutput), "Result is not an instance of CrewOutput."
|
||||||
|
assert (
|
||||||
|
"authentication" in result.raw.lower()
|
||||||
|
), "Result does not contain expected content."
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_asynchronous_task_execution(default_llm_config):
|
||||||
|
"""Test crew execution with asynchronous tasks."""
|
||||||
|
|
||||||
|
# Initialize agent
|
||||||
|
data_processor = Agent(
|
||||||
|
role="Data Processor",
|
||||||
|
goal="Process large datasets efficiently.",
|
||||||
|
backstory="An expert in data processing and analysis.",
|
||||||
|
llm=default_llm_config,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Define tasks with async_execution=True
|
||||||
|
async_task1 = Task(
|
||||||
|
description="Process dataset A asynchronously.",
|
||||||
|
expected_output="Processed results of dataset A.",
|
||||||
|
agent=data_processor,
|
||||||
|
async_execution=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
async_task2 = Task(
|
||||||
|
description="Process dataset B asynchronously.",
|
||||||
|
expected_output="Processed results of dataset B.",
|
||||||
|
agent=data_processor,
|
||||||
|
async_execution=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create the crew
|
||||||
|
crew = Crew(
|
||||||
|
agents=[data_processor],
|
||||||
|
tasks=[async_task1, async_task2],
|
||||||
|
process=Process.sequential,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Execute the crew asynchronously
|
||||||
|
result = await crew.kickoff_async()
|
||||||
|
|
||||||
|
# Assertions to verify the result
|
||||||
|
assert result is not None, "Crew execution did not return a result."
|
||||||
|
assert isinstance(result, CrewOutput), "Result is not an instance of CrewOutput."
|
||||||
|
assert (
|
||||||
|
"dataset a" in result.raw.lower() or "dataset b" in result.raw.lower()
|
||||||
|
), "Result does not contain expected content."
|
||||||
|
|
||||||
|
|
||||||
|
def test_crew_with_conditional_task(default_llm_config):
|
||||||
|
"""Test crew execution that includes a conditional task."""
|
||||||
|
|
||||||
|
# Initialize agents
|
||||||
|
analyst = Agent(
|
||||||
|
role="Analyst",
|
||||||
|
goal="Analyze data and make decisions based on insights.",
|
||||||
|
backstory="A data analyst with experience in predictive modeling.",
|
||||||
|
llm=default_llm_config,
|
||||||
|
)
|
||||||
|
|
||||||
|
decision_maker = Agent(
|
||||||
|
role="Decision Maker",
|
||||||
|
goal="Make decisions based on analysis.",
|
||||||
|
backstory="An executive responsible for strategic decisions.",
|
||||||
|
llm=default_llm_config,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Define tasks
|
||||||
|
analysis_task = Task(
|
||||||
|
description="Analyze the quarterly financial data.",
|
||||||
|
expected_output="A report highlighting key financial insights.",
|
||||||
|
agent=analyst,
|
||||||
|
)
|
||||||
|
|
||||||
|
decision_task = ConditionalTask(
|
||||||
|
description="If the profit margin is below 10%, recommend cost-cutting measures.",
|
||||||
|
expected_output="Recommendations for reducing costs.",
|
||||||
|
agent=decision_maker,
|
||||||
|
condition=lambda output: "profit margin below 10%" in output.lower(),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create the crew
|
||||||
|
crew = Crew(
|
||||||
|
agents=[analyst, decision_maker],
|
||||||
|
tasks=[analysis_task, decision_task],
|
||||||
|
process=Process.sequential,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Execute the crew
|
||||||
|
result = crew.kickoff()
|
||||||
|
|
||||||
|
# Assertions to verify the result
|
||||||
|
assert result is not None, "Crew execution did not return a result."
|
||||||
|
assert isinstance(result, CrewOutput), "Result is not an instance of CrewOutput."
|
||||||
|
assert len(result.tasks_output) >= 1, "No tasks were executed."
|
||||||
|
|
||||||
|
|
||||||
|
def test_crew_with_output_file():
|
||||||
|
"""Test crew execution that writes output to a file."""
|
||||||
|
|
||||||
|
# Access the API key from environment variables
|
||||||
|
openai_api_key = os.environ.get("OPENAI_API_KEY")
|
||||||
|
assert openai_api_key, "OPENAI_API_KEY environment variable is not set."
|
||||||
|
|
||||||
|
# Create a temporary directory for output files
|
||||||
|
with tempfile.TemporaryDirectory() as tmpdirname:
|
||||||
|
|
||||||
|
# Initialize agent
|
||||||
|
content_creator = Agent(
|
||||||
|
role="Content Creator",
|
||||||
|
goal="Generate engaging blog content.",
|
||||||
|
backstory="A creative writer with a passion for storytelling.",
|
||||||
|
llm={"provider": "openai", "model": "gpt-4", "api_key": openai_api_key},
|
||||||
|
)
|
||||||
|
|
||||||
|
# Define task with output file
|
||||||
|
output_file_path = f"{tmpdirname}/blog_post.txt"
|
||||||
|
blog_task = Task(
|
||||||
|
description="Write a blog post about the benefits of remote work.",
|
||||||
|
expected_output="An informative and engaging blog post.",
|
||||||
|
agent=content_creator,
|
||||||
|
output_file=output_file_path,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create the crew
|
||||||
|
crew = Crew(
|
||||||
|
agents=[content_creator],
|
||||||
|
tasks=[blog_task],
|
||||||
|
process=Process.sequential,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Execute the crew
|
||||||
|
crew.kickoff()
|
||||||
|
|
||||||
|
# Assertions to verify the result
|
||||||
|
assert os.path.exists(output_file_path), "Output file was not created."
|
||||||
|
|
||||||
|
# Read the content from the file and perform assertions
|
||||||
|
with open(output_file_path, "r") as file:
|
||||||
|
content = file.read()
|
||||||
|
assert (
|
||||||
|
"remote work" in content.lower()
|
||||||
|
), "Output file does not contain expected content."
|
||||||
|
|
||||||
|
|
||||||
|
def test_invalid_hierarchical_process():
|
||||||
|
"""Test that an error is raised when using hierarchical process without a manager agent or manager_llm."""
|
||||||
|
with pytest.raises(ValueError) as exc_info:
|
||||||
|
Crew(
|
||||||
|
agents=[],
|
||||||
|
tasks=[],
|
||||||
|
process=Process.hierarchical, # Hierarchical process without a manager
|
||||||
|
)
|
||||||
|
assert "manager_llm or manager_agent is required" in str(exc_info.value)
|
||||||
|
|
||||||
|
|
||||||
|
def test_crew_with_memory(memory_agent, memory_tasks):
|
||||||
|
"""Test crew execution utilizing memory."""
|
||||||
|
|
||||||
|
# Enable memory in the crew
|
||||||
|
crew = Crew(
|
||||||
|
agents=[memory_agent],
|
||||||
|
tasks=memory_tasks,
|
||||||
|
process=Process.sequential,
|
||||||
|
memory=True, # Enable memory
|
||||||
|
)
|
||||||
|
|
||||||
|
# Execute the crew
|
||||||
|
result = crew.kickoff()
|
||||||
|
|
||||||
|
# Assertions to verify the result
|
||||||
|
assert result is not None, "Crew execution did not return a result."
|
||||||
|
assert isinstance(result, CrewOutput), "Result is not an instance of CrewOutput."
|
||||||
|
assert (
|
||||||
|
"history of ai" in result.raw.lower() and "future of ai" in result.raw.lower()
|
||||||
|
), "Result does not contain expected content."
|
||||||
@@ -1,103 +0,0 @@
|
|||||||
interactions:
|
|
||||||
- request:
|
|
||||||
body: '{"messages": [{"role": "system", "content": "You are Test Role. Test Backstory\nYour
|
|
||||||
personal goal is: Test Goal\nTo give my best complete final answer to the task
|
|
||||||
use the exact following format:\n\nThought: I now can give a great answer\nFinal
|
|
||||||
Answer: Your final answer must be the great and the most complete as possible,
|
|
||||||
it must be outcome described.\n\nI MUST use these formats, my job depends on
|
|
||||||
it!"}, {"role": "user", "content": "\nCurrent Task: Return: Test output\n\nThis
|
|
||||||
is the expect criteria for your final answer: Test output\nyou MUST return the
|
|
||||||
actual complete content as the final answer, not a summary.\n\nBegin! This is
|
|
||||||
VERY important to you, use the tools available and give your best Final Answer,
|
|
||||||
your job depends on it!\n\nThought:"}], "model": "gpt-4o"}'
|
|
||||||
headers:
|
|
||||||
accept:
|
|
||||||
- application/json
|
|
||||||
accept-encoding:
|
|
||||||
- gzip, deflate
|
|
||||||
connection:
|
|
||||||
- keep-alive
|
|
||||||
content-length:
|
|
||||||
- '776'
|
|
||||||
content-type:
|
|
||||||
- application/json
|
|
||||||
cookie:
|
|
||||||
- __cf_bm=9.8sBYBkvBR8R1K_bVF7xgU..80XKlEIg3N2OBbTSCU-1727214102-1.0.1.1-.qiTLXbPamYUMSuyNsOEB9jhGu.jOifujOrx9E2JZvStbIZ9RTIiE44xKKNfLPxQkOi6qAT3h6htK8lPDGV_5g;
|
|
||||||
_cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
|
|
||||||
host:
|
|
||||||
- api.openai.com
|
|
||||||
user-agent:
|
|
||||||
- OpenAI/Python 1.47.0
|
|
||||||
x-stainless-arch:
|
|
||||||
- arm64
|
|
||||||
x-stainless-async:
|
|
||||||
- 'false'
|
|
||||||
x-stainless-lang:
|
|
||||||
- python
|
|
||||||
x-stainless-os:
|
|
||||||
- MacOS
|
|
||||||
x-stainless-package-version:
|
|
||||||
- 1.47.0
|
|
||||||
x-stainless-raw-response:
|
|
||||||
- 'true'
|
|
||||||
x-stainless-runtime:
|
|
||||||
- CPython
|
|
||||||
x-stainless-runtime-version:
|
|
||||||
- 3.11.7
|
|
||||||
method: POST
|
|
||||||
uri: https://api.openai.com/v1/chat/completions
|
|
||||||
response:
|
|
||||||
content: "{\n \"id\": \"chatcmpl-AB7fr4aPstiFUArxwxTVdfJSFwxsC\",\n \"object\":
|
|
||||||
\"chat.completion\",\n \"created\": 1727214471,\n \"model\": \"gpt-4o-2024-05-13\",\n
|
|
||||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
|
||||||
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
|
||||||
Answer: Test output\",\n \"refusal\": null\n },\n \"logprobs\":
|
|
||||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
|
||||||
155,\n \"completion_tokens\": 15,\n \"total_tokens\": 170,\n \"completion_tokens_details\":
|
|
||||||
{\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_52a7f40b0b\"\n}\n"
|
|
||||||
headers:
|
|
||||||
CF-Cache-Status:
|
|
||||||
- DYNAMIC
|
|
||||||
CF-RAY:
|
|
||||||
- 8c85f9a91e311cf3-GRU
|
|
||||||
Connection:
|
|
||||||
- keep-alive
|
|
||||||
Content-Encoding:
|
|
||||||
- gzip
|
|
||||||
Content-Type:
|
|
||||||
- application/json
|
|
||||||
Date:
|
|
||||||
- Tue, 24 Sep 2024 21:47:51 GMT
|
|
||||||
Server:
|
|
||||||
- cloudflare
|
|
||||||
Transfer-Encoding:
|
|
||||||
- chunked
|
|
||||||
X-Content-Type-Options:
|
|
||||||
- nosniff
|
|
||||||
access-control-expose-headers:
|
|
||||||
- X-Request-ID
|
|
||||||
openai-organization:
|
|
||||||
- crewai-iuxna1
|
|
||||||
openai-processing-ms:
|
|
||||||
- '216'
|
|
||||||
openai-version:
|
|
||||||
- '2020-10-01'
|
|
||||||
strict-transport-security:
|
|
||||||
- max-age=31536000; includeSubDomains; preload
|
|
||||||
x-ratelimit-limit-requests:
|
|
||||||
- '10000'
|
|
||||||
x-ratelimit-limit-tokens:
|
|
||||||
- '30000000'
|
|
||||||
x-ratelimit-remaining-requests:
|
|
||||||
- '9999'
|
|
||||||
x-ratelimit-remaining-tokens:
|
|
||||||
- '29999817'
|
|
||||||
x-ratelimit-reset-requests:
|
|
||||||
- 6ms
|
|
||||||
x-ratelimit-reset-tokens:
|
|
||||||
- 0s
|
|
||||||
x-request-id:
|
|
||||||
- req_88b1376917b345c976fdb03a55f7b6c1
|
|
||||||
http_version: HTTP/1.1
|
|
||||||
status_code: 200
|
|
||||||
version: 1
|
|
||||||
Reference in New Issue
Block a user