mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-26 01:08:29 +00:00
Compare commits
6 Commits
bug/fix-sh
...
feat/impro
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5dc8dd0e8a | ||
|
|
b8d07fee83 | ||
|
|
be8e33daf6 | ||
|
|
efc8323c63 | ||
|
|
831951efc4 | ||
|
|
2131b94ddb |
@@ -31,7 +31,7 @@ From this point on, your crew will have planning enabled, and the tasks will be
|
||||
|
||||
#### Planning LLM
|
||||
|
||||
Now you can define the LLM that will be used to plan the tasks. You can use any ChatOpenAI LLM model available.
|
||||
Now you can define the LLM that will be used to plan the tasks.
|
||||
|
||||
When running the base case example, you will see something like the output below, which represents the output of the `AgentPlanner`
|
||||
responsible for creating the step-by-step logic to add to the Agents' tasks.
|
||||
@@ -39,7 +39,6 @@ responsible for creating the step-by-step logic to add to the Agents' tasks.
|
||||
<CodeGroup>
|
||||
```python Code
|
||||
from crewai import Crew, Agent, Task, Process
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
# Assemble your crew with planning capabilities and custom LLM
|
||||
my_crew = Crew(
|
||||
@@ -47,7 +46,7 @@ my_crew = Crew(
|
||||
tasks=self.tasks,
|
||||
process=Process.sequential,
|
||||
planning=True,
|
||||
planning_llm=ChatOpenAI(model="gpt-4o")
|
||||
planning_llm="gpt-4o"
|
||||
)
|
||||
|
||||
# Run the crew
|
||||
|
||||
@@ -23,9 +23,7 @@ Processes enable individual agents to operate as a cohesive unit, streamlining t
|
||||
To assign a process to a crew, specify the process type upon crew creation to set the execution strategy. For a hierarchical process, ensure to define `manager_llm` or `manager_agent` for the manager agent.
|
||||
|
||||
```python
|
||||
from crewai import Crew
|
||||
from crewai.process import Process
|
||||
from langchain_openai import ChatOpenAI
|
||||
from crewai import Crew, Process
|
||||
|
||||
# Example: Creating a crew with a sequential process
|
||||
crew = Crew(
|
||||
@@ -40,7 +38,7 @@ crew = Crew(
|
||||
agents=my_agents,
|
||||
tasks=my_tasks,
|
||||
process=Process.hierarchical,
|
||||
manager_llm=ChatOpenAI(model="gpt-4")
|
||||
manager_llm="gpt-4o"
|
||||
# or
|
||||
# manager_agent=my_manager_agent
|
||||
)
|
||||
|
||||
@@ -150,15 +150,20 @@ There are two main ways for one to create a CrewAI tool:
|
||||
|
||||
```python Code
|
||||
from crewai.tools import BaseTool
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
class MyToolInput(BaseModel):
|
||||
"""Input schema for MyCustomTool."""
|
||||
argument: str = Field(..., description="Description of the argument.")
|
||||
|
||||
class MyCustomTool(BaseTool):
|
||||
name: str = "Name of my tool"
|
||||
description: str = "Clear description for what this tool is useful for, your agent will need this information to use it."
|
||||
description: str = "What this tool does. It's vital for effective utilization."
|
||||
args_schema: Type[BaseModel] = MyToolInput
|
||||
|
||||
def _run(self, argument: str) -> str:
|
||||
# Implementation goes here
|
||||
return "Result from custom tool"
|
||||
# Your tool's logic here
|
||||
return "Tool's result"
|
||||
```
|
||||
|
||||
### Utilizing the `tool` Decorator
|
||||
|
||||
@@ -73,9 +73,9 @@ result = crew.kickoff()
|
||||
If you're using the hierarchical process and don't want to set a custom manager agent, you can specify the language model for the manager:
|
||||
|
||||
```python Code
|
||||
from langchain_openai import ChatOpenAI
|
||||
from crewai import LLM
|
||||
|
||||
manager_llm = ChatOpenAI(model_name="gpt-4")
|
||||
manager_llm = LLM(model="gpt-4o")
|
||||
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
|
||||
@@ -301,38 +301,166 @@ Use the annotations to properly reference the agent and task in the `crew.py` fi
|
||||
|
||||
### Annotations include:
|
||||
|
||||
* `@agent`
|
||||
* `@task`
|
||||
* `@crew`
|
||||
* `@tool`
|
||||
* `@before_kickoff`
|
||||
* `@after_kickoff`
|
||||
* `@callback`
|
||||
* `@output_json`
|
||||
* `@output_pydantic`
|
||||
* `@cache_handler`
|
||||
Here are examples of how to use each annotation in your CrewAI project, and when you should use them:
|
||||
|
||||
```python crew.py
|
||||
# ...
|
||||
#### @agent
|
||||
Used to define an agent in your crew. Use this when:
|
||||
- You need to create a specialized AI agent with a specific role
|
||||
- You want the agent to be automatically collected and managed by the crew
|
||||
- You need to reuse the same agent configuration across multiple tasks
|
||||
|
||||
```python
|
||||
@agent
|
||||
def email_summarizer(self) -> Agent:
|
||||
def research_agent(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config["email_summarizer"],
|
||||
role="Research Analyst",
|
||||
goal="Conduct thorough research on given topics",
|
||||
backstory="Expert researcher with years of experience in data analysis",
|
||||
tools=[SerperDevTool()],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
@task
|
||||
def email_summarizer_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config["email_summarizer_task"],
|
||||
)
|
||||
# ...
|
||||
```
|
||||
|
||||
<Tip>
|
||||
In addition to the [sequential process](../how-to/sequential-process), you can use the [hierarchical process](../how-to/hierarchical-process),
|
||||
which automatically assigns a manager to the defined crew to properly coordinate the planning and execution of tasks through delegation and validation of results.
|
||||
You can learn more about the core concepts [here](/concepts).
|
||||
</Tip>
|
||||
#### @task
|
||||
Used to define a task that can be executed by agents. Use this when:
|
||||
- You need to define a specific piece of work for an agent
|
||||
- You want tasks to be automatically sequenced and managed
|
||||
- You need to establish dependencies between different tasks
|
||||
|
||||
```python
|
||||
@task
|
||||
def research_task(self) -> Task:
|
||||
return Task(
|
||||
description="Research the latest developments in AI technology",
|
||||
expected_output="A comprehensive report on AI advancements",
|
||||
agent=self.research_agent(),
|
||||
output_file="output/research.md"
|
||||
)
|
||||
```
|
||||
|
||||
#### @crew
|
||||
Used to define your crew configuration. Use this when:
|
||||
- You want to automatically collect all @agent and @task definitions
|
||||
- You need to specify how tasks should be processed (sequential or hierarchical)
|
||||
- You want to set up crew-wide configurations
|
||||
|
||||
```python
|
||||
@crew
|
||||
def research_crew(self) -> Crew:
|
||||
return Crew(
|
||||
agents=self.agents, # Automatically collected from @agent methods
|
||||
tasks=self.tasks, # Automatically collected from @task methods
|
||||
process=Process.sequential,
|
||||
verbose=True
|
||||
)
|
||||
```
|
||||
|
||||
#### @tool
|
||||
Used to create custom tools for your agents. Use this when:
|
||||
- You need to give agents specific capabilities (like web search, data analysis)
|
||||
- You want to encapsulate external API calls or complex operations
|
||||
- You need to share functionality across multiple agents
|
||||
|
||||
```python
|
||||
@tool
|
||||
def web_search_tool(query: str, max_results: int = 5) -> list[str]:
|
||||
"""
|
||||
Search the web for information.
|
||||
|
||||
Args:
|
||||
query: The search query
|
||||
max_results: Maximum number of results to return
|
||||
|
||||
Returns:
|
||||
List of search results
|
||||
"""
|
||||
# Implement your search logic here
|
||||
return [f"Result {i} for: {query}" for i in range(max_results)]
|
||||
```
|
||||
|
||||
#### @before_kickoff
|
||||
Used to execute logic before the crew starts. Use this when:
|
||||
- You need to validate or preprocess input data
|
||||
- You want to set up resources or configurations before execution
|
||||
- You need to perform any initialization logic
|
||||
|
||||
```python
|
||||
@before_kickoff
|
||||
def validate_inputs(self, inputs: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
|
||||
"""Validate and preprocess inputs before the crew starts."""
|
||||
if inputs is None:
|
||||
return None
|
||||
|
||||
if 'topic' not in inputs:
|
||||
raise ValueError("Topic is required")
|
||||
|
||||
# Add additional context
|
||||
inputs['timestamp'] = datetime.now().isoformat()
|
||||
inputs['topic'] = inputs['topic'].strip().lower()
|
||||
return inputs
|
||||
```
|
||||
|
||||
#### @after_kickoff
|
||||
Used to process results after the crew completes. Use this when:
|
||||
- You need to format or transform the final output
|
||||
- You want to perform cleanup operations
|
||||
- You need to save or log the results in a specific way
|
||||
|
||||
```python
|
||||
@after_kickoff
|
||||
def process_results(self, result: CrewOutput) -> CrewOutput:
|
||||
"""Process and format the results after the crew completes."""
|
||||
result.raw = result.raw.strip()
|
||||
result.raw = f"""
|
||||
# Research Results
|
||||
Generated on: {datetime.now().isoformat()}
|
||||
|
||||
{result.raw}
|
||||
"""
|
||||
return result
|
||||
```
|
||||
|
||||
#### @callback
|
||||
Used to handle events during crew execution. Use this when:
|
||||
- You need to monitor task progress
|
||||
- You want to log intermediate results
|
||||
- You need to implement custom progress tracking or metrics
|
||||
|
||||
```python
|
||||
@callback
|
||||
def log_task_completion(self, task: Task, output: str):
|
||||
"""Log task completion details for monitoring."""
|
||||
print(f"Task '{task.description}' completed")
|
||||
print(f"Output length: {len(output)} characters")
|
||||
print(f"Agent used: {task.agent.role}")
|
||||
print("-" * 50)
|
||||
```
|
||||
|
||||
#### @cache_handler
|
||||
Used to implement custom caching for task results. Use this when:
|
||||
- You want to avoid redundant expensive operations
|
||||
- You need to implement custom cache storage or expiration logic
|
||||
- You want to persist results between runs
|
||||
|
||||
```python
|
||||
@cache_handler
|
||||
def custom_cache(self, key: str) -> Optional[str]:
|
||||
"""Custom cache implementation for storing task results."""
|
||||
cache_file = f"cache/{key}.json"
|
||||
|
||||
if os.path.exists(cache_file):
|
||||
with open(cache_file, 'r') as f:
|
||||
data = json.load(f)
|
||||
# Check if cache is still valid (e.g., not expired)
|
||||
if datetime.fromisoformat(data['timestamp']) > datetime.now() - timedelta(days=1):
|
||||
return data['result']
|
||||
return None
|
||||
```
|
||||
|
||||
<Note>
|
||||
These decorators are part of the CrewAI framework and help organize your crew's structure by automatically collecting agents, tasks, and handling various lifecycle events.
|
||||
They should be used within a class decorated with `@CrewBase`.
|
||||
</Note>
|
||||
|
||||
### Replay Tasks from Latest Crew Kickoff
|
||||
|
||||
|
||||
@@ -86,7 +86,7 @@ class Agent(BaseAgent):
|
||||
llm: Union[str, InstanceOf[LLM], Any] = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
function_calling_llm: Optional[Any] = Field(
|
||||
function_calling_llm: Optional[Union[str, InstanceOf[LLM], Any]] = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
system_template: Optional[str] = Field(
|
||||
@@ -142,7 +142,8 @@ class Agent(BaseAgent):
|
||||
self.agent_ops_agent_name = self.role
|
||||
|
||||
self.llm = create_llm(self.llm)
|
||||
self.function_calling_llm = create_llm(self.function_calling_llm)
|
||||
if self.function_calling_llm and not isinstance(self.function_calling_llm, LLM):
|
||||
self.function_calling_llm = create_llm(self.function_calling_llm)
|
||||
|
||||
if not self.agent_executor:
|
||||
self._setup_agent_executor()
|
||||
|
||||
@@ -19,15 +19,10 @@ class CrewAgentExecutorMixin:
|
||||
agent: Optional["BaseAgent"]
|
||||
task: Optional["Task"]
|
||||
iterations: int
|
||||
have_forced_answer: bool
|
||||
max_iter: int
|
||||
_i18n: I18N
|
||||
_printer: Printer = Printer()
|
||||
|
||||
def _should_force_answer(self) -> bool:
|
||||
"""Determine if a forced answer is required based on iteration count."""
|
||||
return (self.iterations >= self.max_iter) and not self.have_forced_answer
|
||||
|
||||
def _create_short_term_memory(self, output) -> None:
|
||||
"""Create and save a short-term memory item if conditions are met."""
|
||||
if (
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import json
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Dict, List, Union
|
||||
from typing import Any, Callable, Dict, List, Optional, Union
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
|
||||
@@ -50,7 +50,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
original_tools: List[Any] = [],
|
||||
function_calling_llm: Any = None,
|
||||
respect_context_window: bool = False,
|
||||
request_within_rpm_limit: Any = None,
|
||||
request_within_rpm_limit: Optional[Callable[[], bool]] = None,
|
||||
callbacks: List[Any] = [],
|
||||
):
|
||||
self._i18n: I18N = I18N()
|
||||
@@ -77,7 +77,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self.messages: List[Dict[str, str]] = []
|
||||
self.iterations = 0
|
||||
self.log_error_after = 3
|
||||
self.have_forced_answer = False
|
||||
self.tool_name_to_tool_map: Dict[str, BaseTool] = {
|
||||
tool.name: tool for tool in self.tools
|
||||
}
|
||||
@@ -108,106 +107,149 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self._create_long_term_memory(formatted_answer)
|
||||
return {"output": formatted_answer.output}
|
||||
|
||||
def _invoke_loop(self, formatted_answer=None):
|
||||
try:
|
||||
while not isinstance(formatted_answer, AgentFinish):
|
||||
if not self.request_within_rpm_limit or self.request_within_rpm_limit():
|
||||
answer = self.llm.call(
|
||||
self.messages,
|
||||
callbacks=self.callbacks,
|
||||
def _invoke_loop(self):
|
||||
"""
|
||||
Main loop to invoke the agent's thought process until it reaches a conclusion
|
||||
or the maximum number of iterations is reached.
|
||||
"""
|
||||
formatted_answer = None
|
||||
while not isinstance(formatted_answer, AgentFinish):
|
||||
try:
|
||||
if self._has_reached_max_iterations():
|
||||
formatted_answer = self._handle_max_iterations_exceeded(
|
||||
formatted_answer
|
||||
)
|
||||
break
|
||||
|
||||
self._enforce_rpm_limit()
|
||||
|
||||
answer = self._get_llm_response()
|
||||
|
||||
formatted_answer = self._process_llm_response(answer)
|
||||
|
||||
if isinstance(formatted_answer, AgentAction):
|
||||
tool_result = self._execute_tool_and_check_finality(
|
||||
formatted_answer
|
||||
)
|
||||
formatted_answer = self._handle_agent_action(
|
||||
formatted_answer, tool_result
|
||||
)
|
||||
|
||||
if answer is None or answer == "":
|
||||
self._printer.print(
|
||||
content="Received None or empty response from LLM call.",
|
||||
color="red",
|
||||
)
|
||||
raise ValueError(
|
||||
"Invalid response from LLM call - None or empty."
|
||||
)
|
||||
self._invoke_step_callback(formatted_answer)
|
||||
self._append_message(formatted_answer.text, role="assistant")
|
||||
|
||||
if not self.use_stop_words:
|
||||
try:
|
||||
self._format_answer(answer)
|
||||
except OutputParserException as e:
|
||||
if (
|
||||
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE
|
||||
in e.error
|
||||
):
|
||||
answer = answer.split("Observation:")[0].strip()
|
||||
except OutputParserException as e:
|
||||
formatted_answer = self._handle_output_parser_exception(e)
|
||||
|
||||
self.iterations += 1
|
||||
formatted_answer = self._format_answer(answer)
|
||||
|
||||
if isinstance(formatted_answer, AgentAction):
|
||||
tool_result = self._execute_tool_and_check_finality(
|
||||
formatted_answer
|
||||
)
|
||||
|
||||
# Directly append the result to the messages if the
|
||||
# tool is "Add image to content" in case of multimodal
|
||||
# agents
|
||||
if formatted_answer.tool == self._i18n.tools("add_image")["name"]:
|
||||
self.messages.append(tool_result.result)
|
||||
continue
|
||||
|
||||
else:
|
||||
if self.step_callback:
|
||||
self.step_callback(tool_result)
|
||||
|
||||
formatted_answer.text += f"\nObservation: {tool_result.result}"
|
||||
|
||||
formatted_answer.result = tool_result.result
|
||||
if tool_result.result_as_answer:
|
||||
return AgentFinish(
|
||||
thought="",
|
||||
output=tool_result.result,
|
||||
text=formatted_answer.text,
|
||||
)
|
||||
self._show_logs(formatted_answer)
|
||||
|
||||
if self.step_callback:
|
||||
self.step_callback(formatted_answer)
|
||||
|
||||
if self._should_force_answer():
|
||||
if self.have_forced_answer:
|
||||
return AgentFinish(
|
||||
thought="",
|
||||
output=self._i18n.errors(
|
||||
"force_final_answer_error"
|
||||
).format(formatted_answer.text),
|
||||
text=formatted_answer.text,
|
||||
)
|
||||
else:
|
||||
formatted_answer.text += (
|
||||
f'\n{self._i18n.errors("force_final_answer")}'
|
||||
)
|
||||
self.have_forced_answer = True
|
||||
self.messages.append(
|
||||
self._format_msg(formatted_answer.text, role="assistant")
|
||||
)
|
||||
|
||||
except OutputParserException as e:
|
||||
self.messages.append({"role": "user", "content": e.error})
|
||||
if self.iterations > self.log_error_after:
|
||||
self._printer.print(
|
||||
content=f"Error parsing LLM output, agent will retry: {e.error}",
|
||||
color="red",
|
||||
)
|
||||
return self._invoke_loop(formatted_answer)
|
||||
|
||||
except Exception as e:
|
||||
if LLMContextLengthExceededException(str(e))._is_context_limit_error(
|
||||
str(e)
|
||||
):
|
||||
self._handle_context_length()
|
||||
return self._invoke_loop(formatted_answer)
|
||||
else:
|
||||
raise e
|
||||
except Exception as e:
|
||||
if self._is_context_length_exceeded(e):
|
||||
self._handle_context_length()
|
||||
continue
|
||||
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
|
||||
def _has_reached_max_iterations(self) -> bool:
|
||||
"""Check if the maximum number of iterations has been reached."""
|
||||
return self.iterations >= self.max_iter
|
||||
|
||||
def _enforce_rpm_limit(self) -> None:
|
||||
"""Enforce the requests per minute (RPM) limit if applicable."""
|
||||
if self.request_within_rpm_limit:
|
||||
self.request_within_rpm_limit()
|
||||
|
||||
def _get_llm_response(self) -> str:
|
||||
"""Call the LLM and return the response, handling any invalid responses."""
|
||||
answer = self.llm.call(
|
||||
self.messages,
|
||||
callbacks=self.callbacks,
|
||||
)
|
||||
|
||||
if not answer:
|
||||
self._printer.print(
|
||||
content="Received None or empty response from LLM call.",
|
||||
color="red",
|
||||
)
|
||||
raise ValueError("Invalid response from LLM call - None or empty.")
|
||||
|
||||
return answer
|
||||
|
||||
def _process_llm_response(self, answer: str) -> Union[AgentAction, AgentFinish]:
|
||||
"""Process the LLM response and format it into an AgentAction or AgentFinish."""
|
||||
if not self.use_stop_words:
|
||||
try:
|
||||
# Preliminary parsing to check for errors.
|
||||
self._format_answer(answer)
|
||||
except OutputParserException as e:
|
||||
if FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE in e.error:
|
||||
answer = answer.split("Observation:")[0].strip()
|
||||
|
||||
self.iterations += 1
|
||||
return self._format_answer(answer)
|
||||
|
||||
def _handle_agent_action(
|
||||
self, formatted_answer: AgentAction, tool_result: ToolResult
|
||||
) -> Union[AgentAction, AgentFinish]:
|
||||
"""Handle the AgentAction, execute tools, and process the results."""
|
||||
add_image_tool = self._i18n.tools("add_image")
|
||||
if (
|
||||
isinstance(add_image_tool, dict)
|
||||
and formatted_answer.tool.casefold().strip()
|
||||
== add_image_tool.get("name", "").casefold().strip()
|
||||
):
|
||||
self.messages.append(tool_result.result)
|
||||
return formatted_answer # Continue the loop
|
||||
|
||||
if self.step_callback:
|
||||
self.step_callback(tool_result)
|
||||
|
||||
formatted_answer.text += f"\nObservation: {tool_result.result}"
|
||||
formatted_answer.result = tool_result.result
|
||||
|
||||
if tool_result.result_as_answer:
|
||||
return AgentFinish(
|
||||
thought="",
|
||||
output=tool_result.result,
|
||||
text=formatted_answer.text,
|
||||
)
|
||||
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
|
||||
def _invoke_step_callback(self, formatted_answer) -> None:
|
||||
"""Invoke the step callback if it exists."""
|
||||
if self.step_callback:
|
||||
self.step_callback(formatted_answer)
|
||||
|
||||
def _append_message(self, text: str, role: str = "assistant") -> None:
|
||||
"""Append a message to the message list with the given role."""
|
||||
self.messages.append(self._format_msg(text, role=role))
|
||||
|
||||
def _handle_output_parser_exception(self, e: OutputParserException) -> AgentAction:
|
||||
"""Handle OutputParserException by updating messages and formatted_answer."""
|
||||
self.messages.append({"role": "user", "content": e.error})
|
||||
|
||||
formatted_answer = AgentAction(
|
||||
text=e.error,
|
||||
tool="",
|
||||
tool_input="",
|
||||
thought="",
|
||||
)
|
||||
|
||||
if self.iterations > self.log_error_after:
|
||||
self._printer.print(
|
||||
content=f"Error parsing LLM output, agent will retry: {e.error}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
return formatted_answer
|
||||
|
||||
def _is_context_length_exceeded(self, exception: Exception) -> bool:
|
||||
"""Check if the exception is due to context length exceeding."""
|
||||
return LLMContextLengthExceededException(
|
||||
str(exception)
|
||||
)._is_context_limit_error(str(exception))
|
||||
|
||||
def _show_start_logs(self):
|
||||
if self.agent is None:
|
||||
raise ValueError("Agent cannot be None")
|
||||
@@ -272,7 +314,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
agent=self.agent,
|
||||
action=agent_action,
|
||||
)
|
||||
tool_calling = tool_usage.parse(agent_action.text)
|
||||
tool_calling = tool_usage.parse_tool_calling(agent_action.text)
|
||||
|
||||
if isinstance(tool_calling, ToolUsageErrorException):
|
||||
tool_result = tool_calling.message
|
||||
@@ -487,3 +529,45 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self.ask_for_human_input = False
|
||||
|
||||
return formatted_answer
|
||||
|
||||
def _handle_max_iterations_exceeded(self, formatted_answer):
|
||||
"""
|
||||
Handles the case when the maximum number of iterations is exceeded.
|
||||
Performs one more LLM call to get the final answer.
|
||||
|
||||
Parameters:
|
||||
formatted_answer: The last formatted answer from the agent.
|
||||
|
||||
Returns:
|
||||
The final formatted answer after exceeding max iterations.
|
||||
"""
|
||||
self._printer.print(
|
||||
content="Maximum iterations reached. Requesting final answer.",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
if formatted_answer and hasattr(formatted_answer, "text"):
|
||||
assistant_message = (
|
||||
formatted_answer.text + f'\n{self._i18n.errors("force_final_answer")}'
|
||||
)
|
||||
else:
|
||||
assistant_message = self._i18n.errors("force_final_answer")
|
||||
|
||||
self.messages.append(self._format_msg(assistant_message, role="assistant"))
|
||||
|
||||
# Perform one more LLM call to get the final answer
|
||||
answer = self.llm.call(
|
||||
self.messages,
|
||||
callbacks=self.callbacks,
|
||||
)
|
||||
|
||||
if answer is None or answer == "":
|
||||
self._printer.print(
|
||||
content="Received None or empty response from LLM call.",
|
||||
color="red",
|
||||
)
|
||||
raise ValueError("Invalid response from LLM call - None or empty.")
|
||||
|
||||
formatted_answer = self._format_answer(answer)
|
||||
# Return the formatted answer, regardless of its type
|
||||
return formatted_answer
|
||||
|
||||
@@ -47,6 +47,7 @@ from crewai.utilities.formatter import (
|
||||
aggregate_raw_outputs_from_task_outputs,
|
||||
aggregate_raw_outputs_from_tasks,
|
||||
)
|
||||
from crewai.utilities.llm_utils import create_llm
|
||||
from crewai.utilities.planning_handler import CrewPlanner
|
||||
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
@@ -149,7 +150,7 @@ class Crew(BaseModel):
|
||||
manager_agent: Optional[BaseAgent] = Field(
|
||||
description="Custom agent that will be used as manager.", default=None
|
||||
)
|
||||
function_calling_llm: Optional[Any] = Field(
|
||||
function_calling_llm: Optional[Union[str, InstanceOf[LLM], Any]] = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
config: Optional[Union[Json, Dict[str, Any]]] = Field(default=None)
|
||||
@@ -245,15 +246,9 @@ class Crew(BaseModel):
|
||||
if self.output_log_file:
|
||||
self._file_handler = FileHandler(self.output_log_file)
|
||||
self._rpm_controller = RPMController(max_rpm=self.max_rpm, logger=self._logger)
|
||||
if self.function_calling_llm:
|
||||
if isinstance(self.function_calling_llm, str):
|
||||
self.function_calling_llm = LLM(model=self.function_calling_llm)
|
||||
elif not isinstance(self.function_calling_llm, LLM):
|
||||
self.function_calling_llm = LLM(
|
||||
model=getattr(self.function_calling_llm, "model_name", None)
|
||||
or getattr(self.function_calling_llm, "deployment_name", None)
|
||||
or str(self.function_calling_llm)
|
||||
)
|
||||
if self.function_calling_llm and not isinstance(self.function_calling_llm, LLM):
|
||||
self.function_calling_llm = create_llm(self.function_calling_llm)
|
||||
|
||||
self._telemetry = Telemetry()
|
||||
self._telemetry.set_tracer()
|
||||
return self
|
||||
|
||||
@@ -76,7 +76,7 @@ LLM_CONTEXT_WINDOW_SIZES = {
|
||||
"mixtral-8x7b-32768": 32768,
|
||||
"llama-3.3-70b-versatile": 128000,
|
||||
"llama-3.3-70b-instruct": 128000,
|
||||
#sambanova
|
||||
# sambanova
|
||||
"Meta-Llama-3.3-70B-Instruct": 131072,
|
||||
"QwQ-32B-Preview": 8192,
|
||||
"Qwen2.5-72B-Instruct": 8192,
|
||||
|
||||
@@ -1,9 +1,13 @@
|
||||
import ast
|
||||
import datetime
|
||||
import json
|
||||
import re
|
||||
import time
|
||||
from difflib import SequenceMatcher
|
||||
from textwrap import dedent
|
||||
from typing import Any, List, Union
|
||||
from typing import Any, Dict, List, Union
|
||||
|
||||
from json_repair import repair_json
|
||||
|
||||
import crewai.utilities.events as events
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
@@ -19,7 +23,15 @@ try:
|
||||
import agentops # type: ignore
|
||||
except ImportError:
|
||||
agentops = None
|
||||
OPENAI_BIGGER_MODELS = ["gpt-4", "gpt-4o", "o1-preview", "o1-mini", "o1", "o3", "o3-mini"]
|
||||
OPENAI_BIGGER_MODELS = [
|
||||
"gpt-4",
|
||||
"gpt-4o",
|
||||
"o1-preview",
|
||||
"o1-mini",
|
||||
"o1",
|
||||
"o3",
|
||||
"o3-mini",
|
||||
]
|
||||
|
||||
|
||||
class ToolUsageErrorException(Exception):
|
||||
@@ -80,7 +92,7 @@ class ToolUsage:
|
||||
self._max_parsing_attempts = 2
|
||||
self._remember_format_after_usages = 4
|
||||
|
||||
def parse(self, tool_string: str):
|
||||
def parse_tool_calling(self, tool_string: str):
|
||||
"""Parse the tool string and return the tool calling."""
|
||||
return self._tool_calling(tool_string)
|
||||
|
||||
@@ -94,7 +106,6 @@ class ToolUsage:
|
||||
self.task.increment_tools_errors()
|
||||
return error
|
||||
|
||||
# BUG? The code below seems to be unreachable
|
||||
try:
|
||||
tool = self._select_tool(calling.tool_name)
|
||||
except Exception as e:
|
||||
@@ -116,7 +127,7 @@ class ToolUsage:
|
||||
self._printer.print(content=f"\n\n{error}\n", color="red")
|
||||
return error
|
||||
|
||||
return f"{self._use(tool_string=tool_string, tool=tool, calling=calling)}" # type: ignore # BUG?: "_use" of "ToolUsage" does not return a value (it only ever returns None)
|
||||
return f"{self._use(tool_string=tool_string, tool=tool, calling=calling)}"
|
||||
|
||||
def _use(
|
||||
self,
|
||||
@@ -349,13 +360,13 @@ class ToolUsage:
|
||||
tool_name = self.action.tool
|
||||
tool = self._select_tool(tool_name)
|
||||
try:
|
||||
tool_input = self._validate_tool_input(self.action.tool_input)
|
||||
arguments = ast.literal_eval(tool_input)
|
||||
arguments = self._validate_tool_input(self.action.tool_input)
|
||||
|
||||
except Exception:
|
||||
if raise_error:
|
||||
raise
|
||||
else:
|
||||
return ToolUsageErrorException( # type: ignore # Incompatible return value type (got "ToolUsageErrorException", expected "ToolCalling | InstructorToolCalling")
|
||||
return ToolUsageErrorException(
|
||||
f'{self._i18n.errors("tool_arguments_error")}'
|
||||
)
|
||||
|
||||
@@ -363,14 +374,14 @@ class ToolUsage:
|
||||
if raise_error:
|
||||
raise
|
||||
else:
|
||||
return ToolUsageErrorException( # type: ignore # Incompatible return value type (got "ToolUsageErrorException", expected "ToolCalling | InstructorToolCalling")
|
||||
return ToolUsageErrorException(
|
||||
f'{self._i18n.errors("tool_arguments_error")}'
|
||||
)
|
||||
|
||||
return ToolCalling(
|
||||
tool_name=tool.name,
|
||||
arguments=arguments,
|
||||
log=tool_string, # type: ignore
|
||||
log=tool_string,
|
||||
)
|
||||
|
||||
def _tool_calling(
|
||||
@@ -396,57 +407,28 @@ class ToolUsage:
|
||||
)
|
||||
return self._tool_calling(tool_string)
|
||||
|
||||
def _validate_tool_input(self, tool_input: str) -> str:
|
||||
def _validate_tool_input(self, tool_input: str) -> Dict[str, Any]:
|
||||
try:
|
||||
ast.literal_eval(tool_input)
|
||||
return tool_input
|
||||
except Exception:
|
||||
# Clean and ensure the string is properly enclosed in braces
|
||||
tool_input = tool_input.strip()
|
||||
if not tool_input.startswith("{"):
|
||||
tool_input = "{" + tool_input
|
||||
if not tool_input.endswith("}"):
|
||||
tool_input += "}"
|
||||
# Replace Python literals with JSON equivalents
|
||||
replacements = {
|
||||
r"'": '"',
|
||||
r"None": "null",
|
||||
r"True": "true",
|
||||
r"False": "false",
|
||||
}
|
||||
for pattern, replacement in replacements.items():
|
||||
tool_input = re.sub(pattern, replacement, tool_input)
|
||||
|
||||
# Manually split the input into key-value pairs
|
||||
entries = tool_input.strip("{} ").split(",")
|
||||
formatted_entries = []
|
||||
arguments = json.loads(tool_input)
|
||||
except json.JSONDecodeError:
|
||||
# Attempt to repair JSON string
|
||||
repaired_input = repair_json(tool_input)
|
||||
try:
|
||||
arguments = json.loads(repaired_input)
|
||||
except json.JSONDecodeError as e:
|
||||
raise Exception(f"Invalid tool input JSON: {e}")
|
||||
|
||||
for entry in entries:
|
||||
if ":" not in entry:
|
||||
continue # Skip malformed entries
|
||||
key, value = entry.split(":", 1)
|
||||
|
||||
# Remove extraneous white spaces and quotes, replace single quotes
|
||||
key = key.strip().strip('"').replace("'", '"')
|
||||
value = value.strip()
|
||||
|
||||
# Handle replacement of single quotes at the start and end of the value string
|
||||
if value.startswith("'") and value.endswith("'"):
|
||||
value = value[1:-1] # Remove single quotes
|
||||
value = (
|
||||
'"' + value.replace('"', '\\"') + '"'
|
||||
) # Re-encapsulate with double quotes
|
||||
elif value.isdigit(): # Check if value is a digit, hence integer
|
||||
value = value
|
||||
elif value.lower() in [
|
||||
"true",
|
||||
"false",
|
||||
]: # Check for boolean and null values
|
||||
value = value.lower().capitalize()
|
||||
elif value.lower() == "null":
|
||||
value = "None"
|
||||
else:
|
||||
# Assume the value is a string and needs quotes
|
||||
value = '"' + value.replace('"', '\\"') + '"'
|
||||
|
||||
# Rebuild the entry with proper quoting
|
||||
formatted_entry = f'"{key}": {value}'
|
||||
formatted_entries.append(formatted_entry)
|
||||
|
||||
# Reconstruct the JSON string
|
||||
new_json_string = "{" + ", ".join(formatted_entries) + "}"
|
||||
return new_json_string
|
||||
return arguments
|
||||
|
||||
def on_tool_error(self, tool: Any, tool_calling: ToolCalling, e: Exception) -> None:
|
||||
event_data = self._prepare_event_data(tool, tool_calling)
|
||||
|
||||
@@ -9,11 +9,11 @@
|
||||
"task": "\nCurrent Task: {input}\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:",
|
||||
"memory": "\n\n# Useful context: \n{memory}",
|
||||
"role_playing": "You are {role}. {backstory}\nYour personal goal is: {goal}",
|
||||
"tools": "\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nUse the following format:\n\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple python dictionary, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n\nOnce all necessary information is gathered:\n\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n",
|
||||
"no_tools": "\nTo give my best complete final answer to the task use the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!",
|
||||
"format": "I MUST either use a tool (use one at time) OR give my best final answer not both at the same time. To Use the following format:\n\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action, dictionary enclosed in curly braces\nObservation: the result of the action\n... (this Thought/Action/Action Input/Result can repeat N times)\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n",
|
||||
"final_answer_format": "If you don't need to use any more tools, you must give your best complete final answer, make sure it satisfies the expected criteria, use the EXACT format below:\n\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\n\n",
|
||||
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nI just remembered the expected format I must follow:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Result can repeat N times)\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n",
|
||||
"tools": "\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce all necessary information is gathered, return the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n```",
|
||||
"no_tools": "\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!",
|
||||
"format": "I MUST either use a tool (use one at time) OR give my best final answer not both at the same time. When responding, I must use the following format:\n\n```\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action, dictionary enclosed in curly braces\nObservation: the result of the action\n```\nThis Thought/Action/Action Input/Result can repeat N times. Once I know the final answer, I must return the following format:\n\n```\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n```",
|
||||
"final_answer_format": "If you don't need to use any more tools, you must give your best complete final answer, make sure it satisfies the expected criteria, use the EXACT format below:\n\n```\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\n\n```",
|
||||
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nHere is the expected format I must follow:\n\n```\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n```\n This Thought/Action/Action Input/Result process can repeat N times. Once I know the final answer, I must return the following format:\n\n```\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n```",
|
||||
"task_with_context": "{task}\n\nThis is the context you're working with:\n{context}",
|
||||
"expected_output": "\nThis is the expect criteria for your final answer: {expected_output}\nyou MUST return the actual complete content as the final answer, not a summary.",
|
||||
"human_feedback": "You got human feedback on your work, re-evaluate it and give a new Final Answer when ready.\n {human_feedback}",
|
||||
@@ -27,7 +27,7 @@
|
||||
"conversation_history_instruction": "You are a member of a crew collaborating to achieve a common goal. Your task is a specific action that contributes to this larger objective. For additional context, please review the conversation history between you and the user that led to the initiation of this crew. Use any relevant information or feedback from the conversation to inform your task execution and ensure your response aligns with both the immediate task and the crew's overall goals."
|
||||
},
|
||||
"errors": {
|
||||
"force_final_answer_error": "You can't keep going, this was the best you could do.\n {formatted_answer.text}",
|
||||
"force_final_answer_error": "You can't keep going, here is the best final answer you generated:\n\n {formatted_answer}",
|
||||
"force_final_answer": "Now it's time you MUST give your absolute best final answer. You'll ignore all previous instructions, stop using any tools, and just return your absolute BEST Final answer.",
|
||||
"agent_tool_unexisting_coworker": "\nError executing tool. coworker mentioned not found, it must be one of the following options:\n{coworkers}\n",
|
||||
"task_repeated_usage": "I tried reusing the same input, I must stop using this action input. I'll try something else instead.\n\n",
|
||||
|
||||
@@ -67,7 +67,6 @@ def create_llm(
|
||||
api_key=api_key,
|
||||
base_url=base_url,
|
||||
)
|
||||
print("LLM created with extracted parameters; " f"model='{model}'")
|
||||
return created_llm
|
||||
except Exception as e:
|
||||
print(f"Error instantiating LLM from unknown object type: {e}")
|
||||
|
||||
@@ -8,8 +8,10 @@ from crewai.utilities.logger import Logger
|
||||
|
||||
"""Controls request rate limiting for API calls."""
|
||||
|
||||
|
||||
class RPMController(BaseModel):
|
||||
"""Manages requests per minute limiting."""
|
||||
|
||||
max_rpm: Optional[int] = Field(default=None)
|
||||
logger: Logger = Field(default_factory=lambda: Logger(verbose=False))
|
||||
_current_rpm: int = PrivateAttr(default=0)
|
||||
|
||||
@@ -565,7 +565,7 @@ def test_agent_moved_on_after_max_iterations():
|
||||
task=task,
|
||||
tools=[get_final_answer],
|
||||
)
|
||||
assert output == "The final answer is 42."
|
||||
assert output == "42"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -574,7 +574,6 @@ def test_agent_respect_the_max_rpm_set(capsys):
|
||||
def get_final_answer() -> float:
|
||||
"""Get the final answer but don't give it yet, just re-use this
|
||||
tool non-stop."""
|
||||
return 42
|
||||
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
@@ -641,15 +640,14 @@ def test_agent_respect_the_max_rpm_set_over_crew_rpm(capsys):
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_without_max_rpm_respet_crew_rpm(capsys):
|
||||
def test_agent_without_max_rpm_respects_crew_rpm(capsys):
|
||||
from unittest.mock import patch
|
||||
|
||||
from crewai.tools import tool
|
||||
|
||||
@tool
|
||||
def get_final_answer() -> float:
|
||||
"""Get the final answer but don't give it yet, just re-use this
|
||||
tool non-stop."""
|
||||
"""Get the final answer but don't give it yet, just re-use this tool non-stop."""
|
||||
return 42
|
||||
|
||||
agent1 = Agent(
|
||||
@@ -666,23 +664,30 @@ def test_agent_without_max_rpm_respet_crew_rpm(capsys):
|
||||
role="test role2",
|
||||
goal="test goal2",
|
||||
backstory="test backstory2",
|
||||
max_iter=1,
|
||||
max_iter=5,
|
||||
verbose=True,
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
tasks = [
|
||||
Task(
|
||||
description="Just say hi.", agent=agent1, expected_output="Your greeting."
|
||||
description="Just say hi.",
|
||||
agent=agent1,
|
||||
expected_output="Your greeting.",
|
||||
),
|
||||
Task(
|
||||
description="NEVER give a Final Answer, unless you are told otherwise, instead keep using the `get_final_answer` tool non-stop, until you must give you best final answer",
|
||||
description=(
|
||||
"NEVER give a Final Answer, unless you are told otherwise, "
|
||||
"instead keep using the `get_final_answer` tool non-stop, "
|
||||
"until you must give your best final answer"
|
||||
),
|
||||
expected_output="The final answer",
|
||||
tools=[get_final_answer],
|
||||
agent=agent2,
|
||||
),
|
||||
]
|
||||
|
||||
# Set crew's max_rpm to 1 to trigger RPM limit
|
||||
crew = Crew(agents=[agent1, agent2], tasks=tasks, max_rpm=1, verbose=True)
|
||||
|
||||
with patch.object(RPMController, "_wait_for_next_minute") as moveon:
|
||||
|
||||
@@ -2,22 +2,22 @@ interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
||||
personal goal is: test goal\nYou ONLY have access to the following tools, and
|
||||
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer(*args:
|
||||
Any, **kwargs: Any) -> Any\nTool Description: get_final_answer() - Get the final
|
||||
answer but don''t give it yet, just re-use this tool non-stop. \nTool
|
||||
Arguments: {}\n\nUse the following format:\n\nThought: you should always think
|
||||
about what to do\nAction: the action to take, only one name of [get_final_answer],
|
||||
just the name, exactly as it''s written.\nAction Input: the input to the action,
|
||||
just a simple python dictionary, enclosed in curly braces, using \" to wrap
|
||||
keys and values.\nObservation: the result of the action\n\nOnce all necessary
|
||||
information is gathered:\n\nThought: I now know the final answer\nFinal Answer:
|
||||
the final answer to the original input question\n"}, {"role": "user", "content":
|
||||
"\nCurrent Task: The final answer is 42. But don''t give it yet, instead keep
|
||||
using the `get_final_answer` tool.\n\nThis is the expect criteria for your final
|
||||
answer: The final answer\nyou MUST return the actual complete content as the
|
||||
final answer, not a summary.\n\nBegin! This is VERY important to you, use the
|
||||
tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],
|
||||
"model": "gpt-4o", "stop": ["\nObservation:"]}'
|
||||
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer\nTool
|
||||
Arguments: {}\nTool Description: Get the final answer but don''t give it yet,
|
||||
just re-use this\n tool non-stop.\n\nUse the following format:\n\nThought:
|
||||
you should always think about what to do\nAction: the action to take, only one
|
||||
name of [get_final_answer], just the name, exactly as it''s written.\nAction
|
||||
Input: the input to the action, just a simple python dictionary, enclosed in
|
||||
curly braces, using \" to wrap keys and values.\nObservation: the result of
|
||||
the action\n\nOnce all necessary information is gathered:\n\nThought: I now
|
||||
know the final answer\nFinal Answer: the final answer to the original input
|
||||
question"}, {"role": "user", "content": "\nCurrent Task: The final answer is
|
||||
42. But don''t give it yet, instead keep using the `get_final_answer` tool.\n\nThis
|
||||
is the expect criteria for your final answer: The final answer\nyou MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:"}], "model": "gpt-4o", "stop": ["\nObservation:"],
|
||||
"stream": false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -26,16 +26,15 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1417'
|
||||
- '1377'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=rb61BZH2ejzD5YPmLaEJqI7km71QqyNJGTVdNxBq6qk-1727213194-1.0.1.1-pJ49onmgX9IugEMuYQMralzD7oj_6W.CHbSu4Su1z3NyjTGYg.rhgJZWng8feFYah._oSnoYlkTjpK1Wd2C9FA;
|
||||
_cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
|
||||
- _cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.47.0
|
||||
- OpenAI/Python 1.52.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -45,30 +44,35 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.47.0
|
||||
- 1.52.1
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
- 3.12.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AB7NCE9qkjnVxfeWuK9NjyCdymuXJ\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727213314,\n \"model\": \"gpt-4o-2024-05-13\",\n
|
||||
content: "{\n \"id\": \"chatcmpl-An9sn6yimejzB3twOt8E2VAj4Bfmm\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1736279425,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I need to use the `get_final_answer`
|
||||
tool as instructed.\\n\\nAction: get_final_answer\\nAction Input: {}\",\n \"refusal\":
|
||||
null\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 291,\n \"completion_tokens\":
|
||||
26,\n \"total_tokens\": 317,\n \"completion_tokens_details\": {\n \"reasoning_tokens\":
|
||||
0\n }\n },\n \"system_fingerprint\": \"fp_e375328146\"\n}\n"
|
||||
tool to fulfill the current task requirement.\\n\\nAction: get_final_answer\\nAction
|
||||
Input: {}\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
273,\n \"completion_tokens\": 30,\n \"total_tokens\": 303,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
|
||||
\"fp_5f20662549\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8c85dd6b5f411cf3-GRU
|
||||
- 8fe67a03ce78ed83-ATL
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -76,19 +80,27 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 24 Sep 2024 21:28:34 GMT
|
||||
- Tue, 07 Jan 2025 19:50:25 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=PsMOhP_yeSFIMA.FfRlNbisoG88z4l9NSd0zfS5UrOQ-1736279425-1.0.1.1-mdXy_XDkelJX2.9BSuZsl5IsPRGBdcHgIMc_SRz83WcmGCYUkTm1j_f892xrJbOVheWWH9ULwCQrVESupV37Sg;
|
||||
path=/; expires=Tue, 07-Jan-25 20:20:25 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=EYb4UftLm_C7qM4YT78IJt46hRSubZHKnfTXhFp6ZRU-1736279425874-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '526'
|
||||
- '1218'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -100,38 +112,38 @@ interactions:
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999666'
|
||||
- '29999681'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_ed8ca24c64cfdc2b6266c9c8438749f5
|
||||
- req_779992da2a3eb4a25f0b57905c9e8e41
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
||||
personal goal is: test goal\nYou ONLY have access to the following tools, and
|
||||
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer(*args:
|
||||
Any, **kwargs: Any) -> Any\nTool Description: get_final_answer() - Get the final
|
||||
answer but don''t give it yet, just re-use this tool non-stop. \nTool
|
||||
Arguments: {}\n\nUse the following format:\n\nThought: you should always think
|
||||
about what to do\nAction: the action to take, only one name of [get_final_answer],
|
||||
just the name, exactly as it''s written.\nAction Input: the input to the action,
|
||||
just a simple python dictionary, enclosed in curly braces, using \" to wrap
|
||||
keys and values.\nObservation: the result of the action\n\nOnce all necessary
|
||||
information is gathered:\n\nThought: I now know the final answer\nFinal Answer:
|
||||
the final answer to the original input question\n"}, {"role": "user", "content":
|
||||
"\nCurrent Task: The final answer is 42. But don''t give it yet, instead keep
|
||||
using the `get_final_answer` tool.\n\nThis is the expect criteria for your final
|
||||
answer: The final answer\nyou MUST return the actual complete content as the
|
||||
final answer, not a summary.\n\nBegin! This is VERY important to you, use the
|
||||
tools available and give your best Final Answer, your job depends on it!\n\nThought:"},
|
||||
{"role": "assistant", "content": "Thought: I need to use the `get_final_answer`
|
||||
tool as instructed.\n\nAction: get_final_answer\nAction Input: {}\nObservation:
|
||||
42\nNow it''s time you MUST give your absolute best final answer. You''ll ignore
|
||||
all previous instructions, stop using any tools, and just return your absolute
|
||||
BEST Final answer."}], "model": "gpt-4o", "stop": ["\nObservation:"]}'
|
||||
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer\nTool
|
||||
Arguments: {}\nTool Description: Get the final answer but don''t give it yet,
|
||||
just re-use this\n tool non-stop.\n\nUse the following format:\n\nThought:
|
||||
you should always think about what to do\nAction: the action to take, only one
|
||||
name of [get_final_answer], just the name, exactly as it''s written.\nAction
|
||||
Input: the input to the action, just a simple python dictionary, enclosed in
|
||||
curly braces, using \" to wrap keys and values.\nObservation: the result of
|
||||
the action\n\nOnce all necessary information is gathered:\n\nThought: I now
|
||||
know the final answer\nFinal Answer: the final answer to the original input
|
||||
question"}, {"role": "user", "content": "\nCurrent Task: The final answer is
|
||||
42. But don''t give it yet, instead keep using the `get_final_answer` tool.\n\nThis
|
||||
is the expect criteria for your final answer: The final answer\nyou MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:"}, {"role": "assistant", "content": "Thought:
|
||||
I need to use the `get_final_answer` tool to fulfill the current task requirement.\n\nAction:
|
||||
get_final_answer\nAction Input: {}\nObservation: 42\nNow it''s time you MUST
|
||||
give your absolute best final answer. You''ll ignore all previous instructions,
|
||||
stop using any tools, and just return your absolute BEST Final answer."}], "model":
|
||||
"gpt-4o", "stop": ["\nObservation:"], "stream": false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -140,16 +152,16 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1757'
|
||||
- '1743'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=rb61BZH2ejzD5YPmLaEJqI7km71QqyNJGTVdNxBq6qk-1727213194-1.0.1.1-pJ49onmgX9IugEMuYQMralzD7oj_6W.CHbSu4Su1z3NyjTGYg.rhgJZWng8feFYah._oSnoYlkTjpK1Wd2C9FA;
|
||||
_cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
|
||||
- _cfuvid=EYb4UftLm_C7qM4YT78IJt46hRSubZHKnfTXhFp6ZRU-1736279425874-0.0.1.1-604800000;
|
||||
__cf_bm=PsMOhP_yeSFIMA.FfRlNbisoG88z4l9NSd0zfS5UrOQ-1736279425-1.0.1.1-mdXy_XDkelJX2.9BSuZsl5IsPRGBdcHgIMc_SRz83WcmGCYUkTm1j_f892xrJbOVheWWH9ULwCQrVESupV37Sg
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.47.0
|
||||
- OpenAI/Python 1.52.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -159,29 +171,34 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.47.0
|
||||
- 1.52.1
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
- 3.12.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AB7NDCKCn3PlhjPvgqbywxUumo3Qt\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727213315,\n \"model\": \"gpt-4o-2024-05-13\",\n
|
||||
content: "{\n \"id\": \"chatcmpl-An9soTDQVS0ANTzaTZeo6lYN44ZPR\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1736279426,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I now know the final answer\\nFinal
|
||||
Answer: The final answer is 42.\",\n \"refusal\": null\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
358,\n \"completion_tokens\": 19,\n \"total_tokens\": 377,\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_e375328146\"\n}\n"
|
||||
\"assistant\",\n \"content\": \"I now know the final answer.\\n\\nFinal
|
||||
Answer: 42\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
344,\n \"completion_tokens\": 12,\n \"total_tokens\": 356,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
|
||||
\"fp_5f20662549\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8c85dd72daa31cf3-GRU
|
||||
- 8fe67a0c4dbeed83-ATL
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -189,7 +206,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 24 Sep 2024 21:28:36 GMT
|
||||
- Tue, 07 Jan 2025 19:50:26 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -198,10 +215,12 @@ interactions:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '468'
|
||||
- '434'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -213,13 +232,13 @@ interactions:
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999591'
|
||||
- '29999598'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_3f49e6033d3b0400ea55125ca2cf4ee0
|
||||
- req_1184308c5a4ed9130d397fe1645f317e
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
version: 1
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -2,23 +2,23 @@ interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
||||
personal goal is: test goal\nYou ONLY have access to the following tools, and
|
||||
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer(*args:
|
||||
Any, **kwargs: Any) -> Any\nTool Description: get_final_answer() - Get the final
|
||||
answer but don''t give it yet, just re-use this tool non-stop. \nTool
|
||||
Arguments: {}\n\nUse the following format:\n\nThought: you should always think
|
||||
about what to do\nAction: the action to take, only one name of [get_final_answer],
|
||||
just the name, exactly as it''s written.\nAction Input: the input to the action,
|
||||
just a simple python dictionary, enclosed in curly braces, using \" to wrap
|
||||
keys and values.\nObservation: the result of the action\n\nOnce all necessary
|
||||
information is gathered:\n\nThought: I now know the final answer\nFinal Answer:
|
||||
the final answer to the original input question\n"}, {"role": "user", "content":
|
||||
"\nCurrent Task: The final answer is 42. But don''t give it yet, instead keep
|
||||
using the `get_final_answer` tool over and over until you''re told you can give
|
||||
your final answer.\n\nThis is the expect criteria for your final answer: The
|
||||
final answer\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}], "model":
|
||||
"gpt-4o"}'
|
||||
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer\nTool
|
||||
Arguments: {}\nTool Description: Get the final answer but don''t give it yet,
|
||||
just re-use this\n tool non-stop.\n\nUse the following format:\n\nThought:
|
||||
you should always think about what to do\nAction: the action to take, only one
|
||||
name of [get_final_answer], just the name, exactly as it''s written.\nAction
|
||||
Input: the input to the action, just a simple python dictionary, enclosed in
|
||||
curly braces, using \" to wrap keys and values.\nObservation: the result of
|
||||
the action\n\nOnce all necessary information is gathered:\n\nThought: I now
|
||||
know the final answer\nFinal Answer: the final answer to the original input
|
||||
question"}, {"role": "user", "content": "\nCurrent Task: The final answer is
|
||||
42. But don''t give it yet, instead keep using the `get_final_answer` tool over
|
||||
and over until you''re told you can give your final answer.\n\nThis is the expect
|
||||
criteria for your final answer: The final answer\nyou MUST return the actual
|
||||
complete content as the final answer, not a summary.\n\nBegin! This is VERY
|
||||
important to you, use the tools available and give your best Final Answer, your
|
||||
job depends on it!\n\nThought:"}], "model": "gpt-4o", "stop": ["\nObservation:"],
|
||||
"stream": false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -27,16 +27,13 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1452'
|
||||
- '1440'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=rb61BZH2ejzD5YPmLaEJqI7km71QqyNJGTVdNxBq6qk-1727213194-1.0.1.1-pJ49onmgX9IugEMuYQMralzD7oj_6W.CHbSu4Su1z3NyjTGYg.rhgJZWng8feFYah._oSnoYlkTjpK1Wd2C9FA;
|
||||
_cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.47.0
|
||||
- OpenAI/Python 1.52.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -46,30 +43,285 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.47.0
|
||||
- 1.52.1
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
- 3.12.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AB7NlDmtLHCfUZJCFVIKeV5KMyQfX\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727213349,\n \"model\": \"gpt-4o-2024-05-13\",\n
|
||||
content: "{\n \"id\": \"chatcmpl-AnAdPHapYzkPkClCzFaWzfCAUHlWI\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1736282315,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I need to use the provided tool
|
||||
as instructed.\\n\\nAction: get_final_answer\\nAction Input: {}\",\n \"refusal\":
|
||||
\"assistant\",\n \"content\": \"I need to use the `get_final_answer`
|
||||
tool and then keep using it repeatedly as instructed. \\n\\nAction: get_final_answer\\nAction
|
||||
Input: {}\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
285,\n \"completion_tokens\": 31,\n \"total_tokens\": 316,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
|
||||
\"fp_5f20662549\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8fe6c096ee70ed8c-ATL
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 07 Jan 2025 20:38:36 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=hkH74Rv9bMDMhhK.Ep.9blvKIwXeSSwlCoTNGk9qVpA-1736282316-1.0.1.1-5PAsOPpVEfTNNy5DYRlLH1f4caHJArumiloWf.L51RQPWN3uIWsBSuhLVbNQDYVCQb9RQK8W5DcXv5Jq9FvsLA;
|
||||
path=/; expires=Tue, 07-Jan-25 21:08:36 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=vqZ5X0AXIJfzp5UJSFyTmaCVjA.L8Yg35b.ijZFAPM4-1736282316289-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '883'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999665'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_00de12bc6822ef095f4f368aae873f31
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
||||
personal goal is: test goal\nYou ONLY have access to the following tools, and
|
||||
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer\nTool
|
||||
Arguments: {}\nTool Description: Get the final answer but don''t give it yet,
|
||||
just re-use this\n tool non-stop.\n\nUse the following format:\n\nThought:
|
||||
you should always think about what to do\nAction: the action to take, only one
|
||||
name of [get_final_answer], just the name, exactly as it''s written.\nAction
|
||||
Input: the input to the action, just a simple python dictionary, enclosed in
|
||||
curly braces, using \" to wrap keys and values.\nObservation: the result of
|
||||
the action\n\nOnce all necessary information is gathered:\n\nThought: I now
|
||||
know the final answer\nFinal Answer: the final answer to the original input
|
||||
question"}, {"role": "user", "content": "\nCurrent Task: The final answer is
|
||||
42. But don''t give it yet, instead keep using the `get_final_answer` tool over
|
||||
and over until you''re told you can give your final answer.\n\nThis is the expect
|
||||
criteria for your final answer: The final answer\nyou MUST return the actual
|
||||
complete content as the final answer, not a summary.\n\nBegin! This is VERY
|
||||
important to you, use the tools available and give your best Final Answer, your
|
||||
job depends on it!\n\nThought:"}, {"role": "assistant", "content": "I need to
|
||||
use the `get_final_answer` tool and then keep using it repeatedly as instructed.
|
||||
\n\nAction: get_final_answer\nAction Input: {}\nObservation: 42"}], "model":
|
||||
"gpt-4o", "stop": ["\nObservation:"], "stream": false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1632'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=hkH74Rv9bMDMhhK.Ep.9blvKIwXeSSwlCoTNGk9qVpA-1736282316-1.0.1.1-5PAsOPpVEfTNNy5DYRlLH1f4caHJArumiloWf.L51RQPWN3uIWsBSuhLVbNQDYVCQb9RQK8W5DcXv5Jq9FvsLA;
|
||||
_cfuvid=vqZ5X0AXIJfzp5UJSFyTmaCVjA.L8Yg35b.ijZFAPM4-1736282316289-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.52.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.52.1
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AnAdQKGW3Q8LUCmphL7hkavxi4zWB\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1736282316,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I should continue using the `get_final_answer`
|
||||
tool as per the instructions.\\n\\nAction: get_final_answer\\nAction Input:
|
||||
{}\",\n \"refusal\": null\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 324,\n \"completion_tokens\":
|
||||
26,\n \"total_tokens\": 350,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
|
||||
\"fp_5f20662549\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8fe6c09e6c69ed8c-ATL
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 07 Jan 2025 20:38:37 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '542'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999627'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_6844467024f67bb1477445b1a8a01761
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
||||
personal goal is: test goal\nYou ONLY have access to the following tools, and
|
||||
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer\nTool
|
||||
Arguments: {}\nTool Description: Get the final answer but don''t give it yet,
|
||||
just re-use this\n tool non-stop.\n\nUse the following format:\n\nThought:
|
||||
you should always think about what to do\nAction: the action to take, only one
|
||||
name of [get_final_answer], just the name, exactly as it''s written.\nAction
|
||||
Input: the input to the action, just a simple python dictionary, enclosed in
|
||||
curly braces, using \" to wrap keys and values.\nObservation: the result of
|
||||
the action\n\nOnce all necessary information is gathered:\n\nThought: I now
|
||||
know the final answer\nFinal Answer: the final answer to the original input
|
||||
question"}, {"role": "user", "content": "\nCurrent Task: The final answer is
|
||||
42. But don''t give it yet, instead keep using the `get_final_answer` tool over
|
||||
and over until you''re told you can give your final answer.\n\nThis is the expect
|
||||
criteria for your final answer: The final answer\nyou MUST return the actual
|
||||
complete content as the final answer, not a summary.\n\nBegin! This is VERY
|
||||
important to you, use the tools available and give your best Final Answer, your
|
||||
job depends on it!\n\nThought:"}, {"role": "assistant", "content": "I need to
|
||||
use the `get_final_answer` tool and then keep using it repeatedly as instructed.
|
||||
\n\nAction: get_final_answer\nAction Input: {}\nObservation: 42"}, {"role":
|
||||
"assistant", "content": "I should continue using the `get_final_answer` tool
|
||||
as per the instructions.\n\nAction: get_final_answer\nAction Input: {}\nObservation:
|
||||
I tried reusing the same input, I must stop using this action input. I''ll try
|
||||
something else instead."}], "model": "gpt-4o", "stop": ["\nObservation:"], "stream":
|
||||
false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1908'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=hkH74Rv9bMDMhhK.Ep.9blvKIwXeSSwlCoTNGk9qVpA-1736282316-1.0.1.1-5PAsOPpVEfTNNy5DYRlLH1f4caHJArumiloWf.L51RQPWN3uIWsBSuhLVbNQDYVCQb9RQK8W5DcXv5Jq9FvsLA;
|
||||
_cfuvid=vqZ5X0AXIJfzp5UJSFyTmaCVjA.L8Yg35b.ijZFAPM4-1736282316289-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.52.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.52.1
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AnAdR2lKFEVaDbfD9qaF0Tts0eVMt\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1736282317,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I should persist with using the `get_final_answer`
|
||||
tool.\\n\\nAction: get_final_answer\\nAction Input: {}\",\n \"refusal\":
|
||||
null\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 303,\n \"completion_tokens\":
|
||||
22,\n \"total_tokens\": 325,\n \"completion_tokens_details\": {\n \"reasoning_tokens\":
|
||||
0\n }\n },\n \"system_fingerprint\": \"fp_e375328146\"\n}\n"
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 378,\n \"completion_tokens\":
|
||||
23,\n \"total_tokens\": 401,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
|
||||
\"fp_5f20662549\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8c85de473ae11cf3-GRU
|
||||
- 8fe6c0a2ce3ded8c-ATL
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -77,7 +329,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 24 Sep 2024 21:29:10 GMT
|
||||
- Tue, 07 Jan 2025 20:38:37 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -86,10 +338,12 @@ interactions:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '489'
|
||||
- '492'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -101,273 +355,59 @@ interactions:
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999651'
|
||||
- '29999567'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_de70a4dc416515dda4b2ad48bde52f93
|
||||
- req_198e698a8bc7eea092ea32b83cc4304e
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
||||
personal goal is: test goal\nYou ONLY have access to the following tools, and
|
||||
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer(*args:
|
||||
Any, **kwargs: Any) -> Any\nTool Description: get_final_answer() - Get the final
|
||||
answer but don''t give it yet, just re-use this tool non-stop. \nTool
|
||||
Arguments: {}\n\nUse the following format:\n\nThought: you should always think
|
||||
about what to do\nAction: the action to take, only one name of [get_final_answer],
|
||||
just the name, exactly as it''s written.\nAction Input: the input to the action,
|
||||
just a simple python dictionary, enclosed in curly braces, using \" to wrap
|
||||
keys and values.\nObservation: the result of the action\n\nOnce all necessary
|
||||
information is gathered:\n\nThought: I now know the final answer\nFinal Answer:
|
||||
the final answer to the original input question\n"}, {"role": "user", "content":
|
||||
"\nCurrent Task: The final answer is 42. But don''t give it yet, instead keep
|
||||
using the `get_final_answer` tool over and over until you''re told you can give
|
||||
your final answer.\n\nThis is the expect criteria for your final answer: The
|
||||
final answer\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}, {"role":
|
||||
"assistant", "content": "Thought: I need to use the provided tool as instructed.\n\nAction:
|
||||
get_final_answer\nAction Input: {}\nObservation: 42"}], "model": "gpt-4o"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1608'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=rb61BZH2ejzD5YPmLaEJqI7km71QqyNJGTVdNxBq6qk-1727213194-1.0.1.1-pJ49onmgX9IugEMuYQMralzD7oj_6W.CHbSu4Su1z3NyjTGYg.rhgJZWng8feFYah._oSnoYlkTjpK1Wd2C9FA;
|
||||
_cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.47.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.47.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AB7Nnz14hlEaTdabXodZCVU0UoDhk\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727213351,\n \"model\": \"gpt-4o-2024-05-13\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I must continue using the `get_final_answer`
|
||||
tool as instructed.\\n\\nAction: get_final_answer\\nAction Input: {}\\nObservation:
|
||||
42\",\n \"refusal\": null\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 333,\n \"completion_tokens\":
|
||||
30,\n \"total_tokens\": 363,\n \"completion_tokens_details\": {\n \"reasoning_tokens\":
|
||||
0\n }\n },\n \"system_fingerprint\": \"fp_e375328146\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8c85de5109701cf3-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 24 Sep 2024 21:29:11 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '516'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999620'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_5365ac0e5413bd9330c6ac3f68051bcf
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
||||
personal goal is: test goal\nYou ONLY have access to the following tools, and
|
||||
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer(*args:
|
||||
Any, **kwargs: Any) -> Any\nTool Description: get_final_answer() - Get the final
|
||||
answer but don''t give it yet, just re-use this tool non-stop. \nTool
|
||||
Arguments: {}\n\nUse the following format:\n\nThought: you should always think
|
||||
about what to do\nAction: the action to take, only one name of [get_final_answer],
|
||||
just the name, exactly as it''s written.\nAction Input: the input to the action,
|
||||
just a simple python dictionary, enclosed in curly braces, using \" to wrap
|
||||
keys and values.\nObservation: the result of the action\n\nOnce all necessary
|
||||
information is gathered:\n\nThought: I now know the final answer\nFinal Answer:
|
||||
the final answer to the original input question\n"}, {"role": "user", "content":
|
||||
"\nCurrent Task: The final answer is 42. But don''t give it yet, instead keep
|
||||
using the `get_final_answer` tool over and over until you''re told you can give
|
||||
your final answer.\n\nThis is the expect criteria for your final answer: The
|
||||
final answer\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}, {"role":
|
||||
"assistant", "content": "Thought: I need to use the provided tool as instructed.\n\nAction:
|
||||
get_final_answer\nAction Input: {}\nObservation: 42"}, {"role": "assistant",
|
||||
"content": "Thought: I must continue using the `get_final_answer` tool as instructed.\n\nAction:
|
||||
get_final_answer\nAction Input: {}\nObservation: 42\nObservation: 42"}], "model":
|
||||
"gpt-4o"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1799'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=rb61BZH2ejzD5YPmLaEJqI7km71QqyNJGTVdNxBq6qk-1727213194-1.0.1.1-pJ49onmgX9IugEMuYQMralzD7oj_6W.CHbSu4Su1z3NyjTGYg.rhgJZWng8feFYah._oSnoYlkTjpK1Wd2C9FA;
|
||||
_cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.47.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.47.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AB7NoF5Gf597BGmOETPYGxN2eRFxd\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727213352,\n \"model\": \"gpt-4o-2024-05-13\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I must continue using the `get_final_answer`
|
||||
tool to meet the requirements.\\n\\nAction: get_final_answer\\nAction Input:
|
||||
{}\\nObservation: 42\",\n \"refusal\": null\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
372,\n \"completion_tokens\": 32,\n \"total_tokens\": 404,\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_e375328146\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8c85de587bc01cf3-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 24 Sep 2024 21:29:12 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '471'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999583'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_55550369b28e37f064296dbc41e0db69
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
||||
personal goal is: test goal\nYou ONLY have access to the following tools, and
|
||||
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer(*args:
|
||||
Any, **kwargs: Any) -> Any\nTool Description: get_final_answer() - Get the final
|
||||
answer but don''t give it yet, just re-use this tool non-stop. \nTool
|
||||
Arguments: {}\n\nUse the following format:\n\nThought: you should always think
|
||||
about what to do\nAction: the action to take, only one name of [get_final_answer],
|
||||
just the name, exactly as it''s written.\nAction Input: the input to the action,
|
||||
just a simple python dictionary, enclosed in curly braces, using \" to wrap
|
||||
keys and values.\nObservation: the result of the action\n\nOnce all necessary
|
||||
information is gathered:\n\nThought: I now know the final answer\nFinal Answer:
|
||||
the final answer to the original input question\n"}, {"role": "user", "content":
|
||||
"\nCurrent Task: The final answer is 42. But don''t give it yet, instead keep
|
||||
using the `get_final_answer` tool over and over until you''re told you can give
|
||||
your final answer.\n\nThis is the expect criteria for your final answer: The
|
||||
final answer\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}, {"role":
|
||||
"assistant", "content": "Thought: I need to use the provided tool as instructed.\n\nAction:
|
||||
get_final_answer\nAction Input: {}\nObservation: 42"}, {"role": "assistant",
|
||||
"content": "Thought: I must continue using the `get_final_answer` tool as instructed.\n\nAction:
|
||||
get_final_answer\nAction Input: {}\nObservation: 42\nObservation: 42"}, {"role":
|
||||
"assistant", "content": "Thought: I must continue using the `get_final_answer`
|
||||
tool to meet the requirements.\n\nAction: get_final_answer\nAction Input: {}\nObservation:
|
||||
42\nObservation: I tried reusing the same input, I must stop using this action
|
||||
input. I''ll try something else instead.\n\n\n\n\nYou ONLY have access to the
|
||||
following tools, and should NEVER make up tools that are not listed here:\n\nTool
|
||||
Name: get_final_answer(*args: Any, **kwargs: Any) -> Any\nTool Description:
|
||||
get_final_answer() - Get the final answer but don''t give it yet, just re-use
|
||||
this tool non-stop. \nTool Arguments: {}\n\nUse the following format:\n\nThought:
|
||||
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer\nTool
|
||||
Arguments: {}\nTool Description: Get the final answer but don''t give it yet,
|
||||
just re-use this\n tool non-stop.\n\nUse the following format:\n\nThought:
|
||||
you should always think about what to do\nAction: the action to take, only one
|
||||
name of [get_final_answer], just the name, exactly as it''s written.\nAction
|
||||
Input: the input to the action, just a simple python dictionary, enclosed in
|
||||
curly braces, using \" to wrap keys and values.\nObservation: the result of
|
||||
the action\n\nOnce all necessary information is gathered:\n\nThought: I now
|
||||
know the final answer\nFinal Answer: the final answer to the original input
|
||||
question"}, {"role": "user", "content": "\nCurrent Task: The final answer is
|
||||
42. But don''t give it yet, instead keep using the `get_final_answer` tool over
|
||||
and over until you''re told you can give your final answer.\n\nThis is the expect
|
||||
criteria for your final answer: The final answer\nyou MUST return the actual
|
||||
complete content as the final answer, not a summary.\n\nBegin! This is VERY
|
||||
important to you, use the tools available and give your best Final Answer, your
|
||||
job depends on it!\n\nThought:"}, {"role": "assistant", "content": "I need to
|
||||
use the `get_final_answer` tool and then keep using it repeatedly as instructed.
|
||||
\n\nAction: get_final_answer\nAction Input: {}\nObservation: 42"}, {"role":
|
||||
"assistant", "content": "I should continue using the `get_final_answer` tool
|
||||
as per the instructions.\n\nAction: get_final_answer\nAction Input: {}\nObservation:
|
||||
I tried reusing the same input, I must stop using this action input. I''ll try
|
||||
something else instead."}, {"role": "assistant", "content": "I should persist
|
||||
with using the `get_final_answer` tool.\n\nAction: get_final_answer\nAction
|
||||
Input: {}\nObservation: I tried reusing the same input, I must stop using this
|
||||
action input. I''ll try something else instead.\n\n\n\n\nYou ONLY have access
|
||||
to the following tools, and should NEVER make up tools that are not listed here:\n\nTool
|
||||
Name: get_final_answer\nTool Arguments: {}\nTool Description: Get the final
|
||||
answer but don''t give it yet, just re-use this\n tool non-stop.\n\nUse
|
||||
the following format:\n\nThought: you should always think about what to do\nAction:
|
||||
the action to take, only one name of [get_final_answer], just the name, exactly
|
||||
as it''s written.\nAction Input: the input to the action, just a simple python
|
||||
dictionary, enclosed in curly braces, using \" to wrap keys and values.\nObservation:
|
||||
the result of the action\n\nOnce all necessary information is gathered:\n\nThought:
|
||||
I now know the final answer\nFinal Answer: the final answer to the original
|
||||
input question"}, {"role": "assistant", "content": "I should persist with using
|
||||
the `get_final_answer` tool.\n\nAction: get_final_answer\nAction Input: {}\nObservation:
|
||||
I tried reusing the same input, I must stop using this action input. I''ll try
|
||||
something else instead.\n\n\n\n\nYou ONLY have access to the following tools,
|
||||
and should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer\nTool
|
||||
Arguments: {}\nTool Description: Get the final answer but don''t give it yet,
|
||||
just re-use this\n tool non-stop.\n\nUse the following format:\n\nThought:
|
||||
you should always think about what to do\nAction: the action to take, only one
|
||||
name of [get_final_answer], just the name, exactly as it''s written.\nAction
|
||||
Input: the input to the action, just a simple python dictionary, enclosed in
|
||||
@@ -376,7 +416,8 @@ interactions:
|
||||
know the final answer\nFinal Answer: the final answer to the original input
|
||||
question\n\nNow it''s time you MUST give your absolute best final answer. You''ll
|
||||
ignore all previous instructions, stop using any tools, and just return your
|
||||
absolute BEST Final answer."}], "model": "gpt-4o"}'
|
||||
absolute BEST Final answer."}], "model": "gpt-4o", "stop": ["\nObservation:"],
|
||||
"stream": false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
@@ -385,16 +426,16 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '3107'
|
||||
- '4148'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=rb61BZH2ejzD5YPmLaEJqI7km71QqyNJGTVdNxBq6qk-1727213194-1.0.1.1-pJ49onmgX9IugEMuYQMralzD7oj_6W.CHbSu4Su1z3NyjTGYg.rhgJZWng8feFYah._oSnoYlkTjpK1Wd2C9FA;
|
||||
_cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
|
||||
- __cf_bm=hkH74Rv9bMDMhhK.Ep.9blvKIwXeSSwlCoTNGk9qVpA-1736282316-1.0.1.1-5PAsOPpVEfTNNy5DYRlLH1f4caHJArumiloWf.L51RQPWN3uIWsBSuhLVbNQDYVCQb9RQK8W5DcXv5Jq9FvsLA;
|
||||
_cfuvid=vqZ5X0AXIJfzp5UJSFyTmaCVjA.L8Yg35b.ijZFAPM4-1736282316289-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.47.0
|
||||
- OpenAI/Python 1.52.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
@@ -404,29 +445,34 @@ interactions:
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.47.0
|
||||
- 1.52.1
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.7
|
||||
- 3.12.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AB7Npl5ZliMrcSofDS1c7LVGSmmbE\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1727213353,\n \"model\": \"gpt-4o-2024-05-13\",\n
|
||||
content: "{\n \"id\": \"chatcmpl-AnAdRu1aVdsOxxIqU6nqv5dIxwbvu\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1736282317,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I now know the final answer.\\n\\nFinal
|
||||
Answer: The final answer is 42.\",\n \"refusal\": null\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
642,\n \"completion_tokens\": 19,\n \"total_tokens\": 661,\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0\n }\n },\n \"system_fingerprint\": \"fp_e375328146\"\n}\n"
|
||||
\"assistant\",\n \"content\": \"Thought: I now know the final answer.\\nFinal
|
||||
Answer: 42\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
831,\n \"completion_tokens\": 14,\n \"total_tokens\": 845,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
|
||||
\"fp_5f20662549\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8c85de5fad921cf3-GRU
|
||||
- 8fe6c0a68cc3ed8c-ATL
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
@@ -434,7 +480,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 24 Sep 2024 21:29:13 GMT
|
||||
- Tue, 07 Jan 2025 20:38:38 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -443,10 +489,12 @@ interactions:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '320'
|
||||
- '429'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -458,13 +506,13 @@ interactions:
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999271'
|
||||
- '29999037'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 1ms
|
||||
x-request-id:
|
||||
- req_5eba25209fc7e12717cb7e042e7bb4c2
|
||||
- req_2552d63d3cbce15909481cc1fc9f36cc
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
version: 1
|
||||
|
||||
@@ -0,0 +1,117 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Futel Official Infopoint.
|
||||
Futel Football Club info\nYour personal goal is: Answer questions about Futel\nTo
|
||||
give my best complete final answer to the task respond using the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
||||
"content": "\nCurrent Task: Test task\n\nThis is the expect criteria for your
|
||||
final answer: Your best answer to your coworker asking you this, accounting
|
||||
for the context shared.\nyou MUST return the actual complete content as the
|
||||
final answer, not a summary.\n\nBegin! This is VERY important to you, use the
|
||||
tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],
|
||||
"model": "gpt-4o", "stop": ["\nObservation:"], "stream": false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '939'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=cwWdOaPJjFMNJaLtJfa8Kjqavswg5bzVRFzBX4gneGw-1736458417-1.0.1.1-bvf2HshgcMtgn7GdxqwySFDAIacGccDFfEXniBFTTDmbGMCiIIwf6t2DiwWnBldmUHixwc5kDO9gYs08g.feBA;
|
||||
_cfuvid=WMw7PSqkYqQOieguBRs0uNkwNU92A.ZKbgDbCAcV3EQ-1736458417825-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.52.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.52.1
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AnuRlxiTxduAVoXHHY58Fvfbll5IS\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1736458417,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||
Answer: This is a test task, and the context or question from the coworker is
|
||||
not specified. Therefore, my best effort would be to affirm my readiness to
|
||||
answer accurately and in detail any question about Futel Football Club based
|
||||
on the context described. If provided with specific information or questions,
|
||||
I will ensure to respond comprehensively as required by my job directives.\",\n
|
||||
\ \"refusal\": null\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 177,\n \"completion_tokens\":
|
||||
82,\n \"total_tokens\": 259,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
|
||||
\"fp_703d4ff298\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8ff78bf7bd6cc002-ATL
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 09 Jan 2025 21:33:40 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '2263'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999786'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_7c1a31da73cd103e9f410f908e59187f
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
version: 1
|
||||
@@ -0,0 +1,119 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Futel Official Infopoint.
|
||||
Futel Football Club info\nYour personal goal is: Answer questions about Futel\nTo
|
||||
give my best complete final answer to the task respond using the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
||||
"content": "\nCurrent Task: Test task\n\nThis is the expect criteria for your
|
||||
final answer: Your best answer to your coworker asking you this, accounting
|
||||
for the context shared.\nyou MUST return the actual complete content as the
|
||||
final answer, not a summary.\n\nBegin! This is VERY important to you, use the
|
||||
tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],
|
||||
"model": "gpt-4o", "stop": ["\nObservation:"], "stream": false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '939'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=cwWdOaPJjFMNJaLtJfa8Kjqavswg5bzVRFzBX4gneGw-1736458417-1.0.1.1-bvf2HshgcMtgn7GdxqwySFDAIacGccDFfEXniBFTTDmbGMCiIIwf6t2DiwWnBldmUHixwc5kDO9gYs08g.feBA;
|
||||
_cfuvid=WMw7PSqkYqQOieguBRs0uNkwNU92A.ZKbgDbCAcV3EQ-1736458417825-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.52.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.52.1
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AnuRrFJZGKw8cIEshvuW1PKwFZFKs\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1736458423,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||
Answer: Although you mentioned this being a \\\"Test task\\\" and haven't provided
|
||||
a specific question regarding Futel Football Club, your request appears to involve
|
||||
ensuring accuracy and detail in responses. For a proper answer about Futel,
|
||||
I'd be ready to provide details about the club's history, management, players,
|
||||
match schedules, and recent performance statistics. Remember to ask specific
|
||||
questions to receive a targeted response. If this were a real context where
|
||||
information was shared, I would respond precisely to what's been asked regarding
|
||||
Futel Football Club.\",\n \"refusal\": null\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
177,\n \"completion_tokens\": 113,\n \"total_tokens\": 290,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
|
||||
\"fp_703d4ff298\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8ff78c1d0ecdc002-ATL
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 09 Jan 2025 21:33:47 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '3097'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999786'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_179e1d56e2b17303e40480baffbc7b08
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
version: 1
|
||||
@@ -0,0 +1,114 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Futel Official Infopoint.
|
||||
Futel Football Club info\nYour personal goal is: Answer questions about Futel\nTo
|
||||
give my best complete final answer to the task respond using the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
||||
"content": "\nCurrent Task: Test task\n\nThis is the expect criteria for your
|
||||
final answer: Your best answer to your coworker asking you this, accounting
|
||||
for the context shared.\nyou MUST return the actual complete content as the
|
||||
final answer, not a summary.\n\nBegin! This is VERY important to you, use the
|
||||
tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],
|
||||
"model": "gpt-4o", "stop": ["\nObservation:"], "stream": false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '939'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=cwWdOaPJjFMNJaLtJfa8Kjqavswg5bzVRFzBX4gneGw-1736458417-1.0.1.1-bvf2HshgcMtgn7GdxqwySFDAIacGccDFfEXniBFTTDmbGMCiIIwf6t2DiwWnBldmUHixwc5kDO9gYs08g.feBA;
|
||||
_cfuvid=WMw7PSqkYqQOieguBRs0uNkwNU92A.ZKbgDbCAcV3EQ-1736458417825-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.52.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.52.1
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AnuRqgg7eiHnDi2DOqdk99fiqOboz\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1736458422,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||
Answer: Your best answer to your coworker asking you this, accounting for the
|
||||
context shared. You MUST return the actual complete content as the final answer,
|
||||
not a summary.\",\n \"refusal\": null\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
177,\n \"completion_tokens\": 44,\n \"total_tokens\": 221,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
|
||||
\"fp_703d4ff298\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8ff78c164ad2c002-ATL
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 09 Jan 2025 21:33:43 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '899'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999786'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_9f5226208edb90a27987aaf7e0ca03d3
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
version: 1
|
||||
@@ -0,0 +1,119 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Futel Official Infopoint.
|
||||
Futel Football Club info\nYour personal goal is: Answer questions about Futel\nTo
|
||||
give my best complete final answer to the task respond using the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
||||
"content": "\nCurrent Task: Test task\n\nThis is the expect criteria for your
|
||||
final answer: Your best answer to your coworker asking you this, accounting
|
||||
for the context shared.\nyou MUST return the actual complete content as the
|
||||
final answer, not a summary.\n\nBegin! This is VERY important to you, use the
|
||||
tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],
|
||||
"model": "gpt-4o", "stop": ["\nObservation:"], "stream": false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '939'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.52.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.52.1
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AnuRjmwH5mrykLxQhFwTqqTiDtuTf\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1736458415,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||
Answer: As this is a test task, please note that Futel Football Club is fictional
|
||||
and any specific details about it would not be available. However, if you have
|
||||
specific questions or need information about a particular aspect of Futel or
|
||||
any general football club inquiry, feel free to ask, and I'll do my best to
|
||||
assist you with your query!\",\n \"refusal\": null\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
177,\n \"completion_tokens\": 79,\n \"total_tokens\": 256,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
|
||||
\"fp_703d4ff298\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8ff78be5eebfc002-ATL
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 09 Jan 2025 21:33:37 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=cwWdOaPJjFMNJaLtJfa8Kjqavswg5bzVRFzBX4gneGw-1736458417-1.0.1.1-bvf2HshgcMtgn7GdxqwySFDAIacGccDFfEXniBFTTDmbGMCiIIwf6t2DiwWnBldmUHixwc5kDO9gYs08g.feBA;
|
||||
path=/; expires=Thu, 09-Jan-25 22:03:37 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=WMw7PSqkYqQOieguBRs0uNkwNU92A.ZKbgDbCAcV3EQ-1736458417825-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '2730'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999786'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_014478ba748f860d10ac250ca0ba824a
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
version: 1
|
||||
@@ -0,0 +1,119 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Futel Official Infopoint.
|
||||
Futel Football Club info\nYour personal goal is: Answer questions about Futel\nTo
|
||||
give my best complete final answer to the task respond using the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
||||
"content": "\nCurrent Task: Test task\n\nThis is the expect criteria for your
|
||||
final answer: Your best answer to your coworker asking you this, accounting
|
||||
for the context shared.\nyou MUST return the actual complete content as the
|
||||
final answer, not a summary.\n\nBegin! This is VERY important to you, use the
|
||||
tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],
|
||||
"model": "gpt-4o", "stop": ["\nObservation:"], "stream": false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '939'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=cwWdOaPJjFMNJaLtJfa8Kjqavswg5bzVRFzBX4gneGw-1736458417-1.0.1.1-bvf2HshgcMtgn7GdxqwySFDAIacGccDFfEXniBFTTDmbGMCiIIwf6t2DiwWnBldmUHixwc5kDO9gYs08g.feBA;
|
||||
_cfuvid=WMw7PSqkYqQOieguBRs0uNkwNU92A.ZKbgDbCAcV3EQ-1736458417825-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.52.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.52.1
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AnuRofLgmzWcDya5LILqYwIJYgFoq\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1736458420,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||
Answer: As an official Futel Football Club infopoint, my responsibility is to
|
||||
provide detailed and accurate information about the club. This includes answering
|
||||
questions regarding team statistics, player performances, upcoming fixtures,
|
||||
ticketing and fan zone details, club history, and community initiatives. Our
|
||||
focus is to ensure that fans and stakeholders have access to the latest and
|
||||
most precise information about the club's on and off-pitch activities. If there's
|
||||
anything specific you need to know, just let me know, and I'll be more than
|
||||
happy to assist!\",\n \"refusal\": null\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
177,\n \"completion_tokens\": 115,\n \"total_tokens\": 292,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
|
||||
\"fp_703d4ff298\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8ff78c066f37c002-ATL
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 09 Jan 2025 21:33:42 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '2459'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999786'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_a146dd27f040f39a576750970cca0f52
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
version: 1
|
||||
@@ -0,0 +1,353 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are test role. test backstory\nYour
|
||||
personal goal is: test goal\nTo give my best complete final answer to the task
|
||||
use the exact following format:\n\nThought: I now can give a great answer\nFinal
|
||||
Answer: Your final answer must be the great and the most complete as possible,
|
||||
it must be outcome described.\n\nI MUST use these formats, my job depends on
|
||||
it!"}, {"role": "user", "content": "\nCurrent Task: Just say hi.\n\nThis is
|
||||
the expect criteria for your final answer: Your greeting.\nyou MUST return the
|
||||
actual complete content as the final answer, not a summary.\n\nBegin! This is
|
||||
VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:"}], "model": "gpt-4o", "stop": ["\nObservation:"],
|
||||
"stream": false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '817'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- _cfuvid=vqZ5X0AXIJfzp5UJSFyTmaCVjA.L8Yg35b.ijZFAPM4-1736282316289-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.52.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.52.1
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AnSbv3ywhwedwS3YW9Crde6hpWpmK\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1736351415,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||
Answer: Hi!\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
154,\n \"completion_tokens\": 13,\n \"total_tokens\": 167,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
|
||||
\"fp_5f20662549\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8fed579a4f76b058-ATL
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 08 Jan 2025 15:50:15 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=rdN2XYZhM9f2vDB8aOVGYgUHUzSuT.cP8ahngq.QTL0-1736351415-1.0.1.1-lVzOV8iFUHvbswld8xls4a8Ct38zv6Jyr.6THknDnVf3uGZMlgV6r5s10uTnHA2eIi07jJtj7vGopiOpU8qkvA;
|
||||
path=/; expires=Wed, 08-Jan-25 16:20:15 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=PslIVDqXn7jd_NXBGdSU5kVFvzwCchKPRVe9LpQVdQA-1736351415895-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '416'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999817'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_97c93aa78417badc3f29306054eef79b
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are test role2. test backstory2\nYour
|
||||
personal goal is: test goal2\nYou ONLY have access to the following tools, and
|
||||
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer\nTool
|
||||
Arguments: {}\nTool Description: Get the final answer but don''t give it yet,
|
||||
just re-use this tool non-stop.\n\nUse the following format:\n\nThought: you
|
||||
should always think about what to do\nAction: the action to take, only one name
|
||||
of [get_final_answer], just the name, exactly as it''s written.\nAction Input:
|
||||
the input to the action, just a simple python dictionary, enclosed in curly
|
||||
braces, using \" to wrap keys and values.\nObservation: the result of the action\n\nOnce
|
||||
all necessary information is gathered:\n\nThought: I now know the final answer\nFinal
|
||||
Answer: the final answer to the original input question"}, {"role": "user",
|
||||
"content": "\nCurrent Task: NEVER give a Final Answer, unless you are told otherwise,
|
||||
instead keep using the `get_final_answer` tool non-stop, until you must give
|
||||
your best final answer\n\nThis is the expect criteria for your final answer:
|
||||
The final answer\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nThis is the context you''re working with:\nHi!\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:"}], "model": "gpt-4o", "stop": ["\nObservation:"],
|
||||
"stream": false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1483'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- _cfuvid=PslIVDqXn7jd_NXBGdSU5kVFvzwCchKPRVe9LpQVdQA-1736351415895-0.0.1.1-604800000;
|
||||
__cf_bm=rdN2XYZhM9f2vDB8aOVGYgUHUzSuT.cP8ahngq.QTL0-1736351415-1.0.1.1-lVzOV8iFUHvbswld8xls4a8Ct38zv6Jyr.6THknDnVf3uGZMlgV6r5s10uTnHA2eIi07jJtj7vGopiOpU8qkvA
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.52.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.52.1
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AnSbwn8QaqAzfBVnzhTzIcDKykYTu\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1736351416,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I should use the available tool to get
|
||||
the final answer, as per the instructions. \\n\\nAction: get_final_answer\\nAction
|
||||
Input: {}\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
294,\n \"completion_tokens\": 28,\n \"total_tokens\": 322,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
|
||||
\"fp_5f20662549\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8fed579dbd80b058-ATL
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 08 Jan 2025 15:50:17 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '1206'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999655'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_7b85f1e9b21b5e2385d8a322a8aab06c
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are test role2. test backstory2\nYour
|
||||
personal goal is: test goal2\nYou ONLY have access to the following tools, and
|
||||
should NEVER make up tools that are not listed here:\n\nTool Name: get_final_answer\nTool
|
||||
Arguments: {}\nTool Description: Get the final answer but don''t give it yet,
|
||||
just re-use this tool non-stop.\n\nUse the following format:\n\nThought: you
|
||||
should always think about what to do\nAction: the action to take, only one name
|
||||
of [get_final_answer], just the name, exactly as it''s written.\nAction Input:
|
||||
the input to the action, just a simple python dictionary, enclosed in curly
|
||||
braces, using \" to wrap keys and values.\nObservation: the result of the action\n\nOnce
|
||||
all necessary information is gathered:\n\nThought: I now know the final answer\nFinal
|
||||
Answer: the final answer to the original input question"}, {"role": "user",
|
||||
"content": "\nCurrent Task: NEVER give a Final Answer, unless you are told otherwise,
|
||||
instead keep using the `get_final_answer` tool non-stop, until you must give
|
||||
your best final answer\n\nThis is the expect criteria for your final answer:
|
||||
The final answer\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nThis is the context you''re working with:\nHi!\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:"}, {"role": "assistant", "content": "I should
|
||||
use the available tool to get the final answer, as per the instructions. \n\nAction:
|
||||
get_final_answer\nAction Input: {}\nObservation: 42"}], "model": "gpt-4o", "stop":
|
||||
["\nObservation:"], "stream": false}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1666'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- _cfuvid=PslIVDqXn7jd_NXBGdSU5kVFvzwCchKPRVe9LpQVdQA-1736351415895-0.0.1.1-604800000;
|
||||
__cf_bm=rdN2XYZhM9f2vDB8aOVGYgUHUzSuT.cP8ahngq.QTL0-1736351415-1.0.1.1-lVzOV8iFUHvbswld8xls4a8Ct38zv6Jyr.6THknDnVf3uGZMlgV6r5s10uTnHA2eIi07jJtj7vGopiOpU8qkvA
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.52.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.52.1
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.7
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-AnSbxXFL4NXuGjOX35eCjcWq456lA\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1736351417,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I now know the final answer\\nFinal
|
||||
Answer: 42\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||
330,\n \"completion_tokens\": 14,\n \"total_tokens\": 344,\n \"prompt_tokens_details\":
|
||||
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
|
||||
\"fp_5f20662549\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8fed57a62955b058-ATL
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 08 Jan 2025 15:50:17 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '438'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999619'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_1cc65e999b352a54a4c42eb8be543545
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
version: 1
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1464,39 +1464,35 @@ def test_dont_set_agents_step_callback_if_already_set():
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_function_calling_llm():
|
||||
from unittest.mock import patch
|
||||
|
||||
from crewai import LLM
|
||||
from crewai.tools import tool
|
||||
|
||||
llm = "gpt-4o"
|
||||
llm = LLM(model="gpt-4o-mini")
|
||||
|
||||
@tool
|
||||
def learn_about_AI() -> str:
|
||||
"""Useful for when you need to learn about AI to write an paragraph about it."""
|
||||
return "AI is a very broad field."
|
||||
def look_up_greeting() -> str:
|
||||
"""Tool used to retrieve a greeting."""
|
||||
return "Howdy!"
|
||||
|
||||
agent1 = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
tools=[learn_about_AI],
|
||||
role="Greeter",
|
||||
goal="Say hello.",
|
||||
backstory="You are a friendly greeter.",
|
||||
tools=[look_up_greeting],
|
||||
llm="gpt-4o-mini",
|
||||
function_calling_llm=llm,
|
||||
)
|
||||
|
||||
essay = Task(
|
||||
description="Write and then review an small paragraph on AI until it's AMAZING",
|
||||
expected_output="The final paragraph.",
|
||||
description="Look up the greeting and say it.",
|
||||
expected_output="A greeting.",
|
||||
agent=agent1,
|
||||
)
|
||||
tasks = [essay]
|
||||
crew = Crew(agents=[agent1], tasks=tasks)
|
||||
|
||||
with patch.object(
|
||||
instructor, "from_litellm", wraps=instructor.from_litellm
|
||||
) as mock_from_litellm:
|
||||
crew.kickoff()
|
||||
mock_from_litellm.assert_called()
|
||||
crew = Crew(agents=[agent1], tasks=[essay])
|
||||
result = crew.kickoff()
|
||||
assert result.raw == "Howdy!"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai import Agent, Task
|
||||
from crewai import Agent
|
||||
from crewai.tools.agent_tools.base_agent_tools import BaseAgentTool
|
||||
|
||||
|
||||
@@ -22,12 +20,9 @@ class InternalAgentTool(BaseAgentTool):
|
||||
("Futel Official Infopoint\n", True), # trailing newline
|
||||
('"Futel Official Infopoint"', True), # embedded quotes
|
||||
(" FUTEL\nOFFICIAL INFOPOINT ", True), # multiple whitespace and newline
|
||||
("futel official infopoint", True), # lowercase
|
||||
("FUTEL OFFICIAL INFOPOINT", True), # uppercase
|
||||
("Non Existent Agent", False), # non-existent agent
|
||||
(None, False), # None agent name
|
||||
],
|
||||
)
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_tool_role_matching(role_name, should_match):
|
||||
"""Test that agent tools can match roles regardless of case, whitespace, and special characters."""
|
||||
# Create test agent
|
||||
|
||||
@@ -121,3 +121,113 @@ def test_tool_usage_render():
|
||||
"Tool Name: Random Number Generator\nTool Arguments: {'min_value': {'description': 'The minimum value of the range (inclusive)', 'type': 'int'}, 'max_value': {'description': 'The maximum value of the range (inclusive)', 'type': 'int'}}\nTool Description: Generates a random number within a specified range"
|
||||
in rendered
|
||||
)
|
||||
|
||||
|
||||
def test_validate_tool_input_booleans_and_none():
|
||||
# Create a ToolUsage instance with mocks
|
||||
tool_usage = ToolUsage(
|
||||
tools_handler=MagicMock(),
|
||||
tools=[],
|
||||
original_tools=[],
|
||||
tools_description="",
|
||||
tools_names="",
|
||||
task=MagicMock(),
|
||||
function_calling_llm=MagicMock(),
|
||||
agent=MagicMock(),
|
||||
action=MagicMock(),
|
||||
)
|
||||
|
||||
# Input with booleans and None
|
||||
tool_input = '{"key1": True, "key2": False, "key3": None}'
|
||||
expected_arguments = {"key1": True, "key2": False, "key3": None}
|
||||
|
||||
arguments = tool_usage._validate_tool_input(tool_input)
|
||||
assert arguments == expected_arguments
|
||||
|
||||
|
||||
def test_validate_tool_input_mixed_types():
|
||||
# Create a ToolUsage instance with mocks
|
||||
tool_usage = ToolUsage(
|
||||
tools_handler=MagicMock(),
|
||||
tools=[],
|
||||
original_tools=[],
|
||||
tools_description="",
|
||||
tools_names="",
|
||||
task=MagicMock(),
|
||||
function_calling_llm=MagicMock(),
|
||||
agent=MagicMock(),
|
||||
action=MagicMock(),
|
||||
)
|
||||
|
||||
# Input with mixed types
|
||||
tool_input = '{"number": 123, "text": "Some text", "flag": True}'
|
||||
expected_arguments = {"number": 123, "text": "Some text", "flag": True}
|
||||
|
||||
arguments = tool_usage._validate_tool_input(tool_input)
|
||||
assert arguments == expected_arguments
|
||||
|
||||
|
||||
def test_validate_tool_input_single_quotes():
|
||||
# Create a ToolUsage instance with mocks
|
||||
tool_usage = ToolUsage(
|
||||
tools_handler=MagicMock(),
|
||||
tools=[],
|
||||
original_tools=[],
|
||||
tools_description="",
|
||||
tools_names="",
|
||||
task=MagicMock(),
|
||||
function_calling_llm=MagicMock(),
|
||||
agent=MagicMock(),
|
||||
action=MagicMock(),
|
||||
)
|
||||
|
||||
# Input with single quotes instead of double quotes
|
||||
tool_input = "{'key': 'value', 'flag': True}"
|
||||
expected_arguments = {"key": "value", "flag": True}
|
||||
|
||||
arguments = tool_usage._validate_tool_input(tool_input)
|
||||
assert arguments == expected_arguments
|
||||
|
||||
|
||||
def test_validate_tool_input_invalid_json_repairable():
|
||||
# Create a ToolUsage instance with mocks
|
||||
tool_usage = ToolUsage(
|
||||
tools_handler=MagicMock(),
|
||||
tools=[],
|
||||
original_tools=[],
|
||||
tools_description="",
|
||||
tools_names="",
|
||||
task=MagicMock(),
|
||||
function_calling_llm=MagicMock(),
|
||||
agent=MagicMock(),
|
||||
action=MagicMock(),
|
||||
)
|
||||
|
||||
# Invalid JSON input that can be repaired
|
||||
tool_input = '{"key": "value", "list": [1, 2, 3,]}'
|
||||
expected_arguments = {"key": "value", "list": [1, 2, 3]}
|
||||
|
||||
arguments = tool_usage._validate_tool_input(tool_input)
|
||||
assert arguments == expected_arguments
|
||||
|
||||
|
||||
def test_validate_tool_input_with_special_characters():
|
||||
# Create a ToolUsage instance with mocks
|
||||
tool_usage = ToolUsage(
|
||||
tools_handler=MagicMock(),
|
||||
tools=[],
|
||||
original_tools=[],
|
||||
tools_description="",
|
||||
tools_names="",
|
||||
task=MagicMock(),
|
||||
function_calling_llm=MagicMock(),
|
||||
agent=MagicMock(),
|
||||
action=MagicMock(),
|
||||
)
|
||||
|
||||
# Input with special characters
|
||||
tool_input = '{"message": "Hello, world! \u263A", "valid": True}'
|
||||
expected_arguments = {"message": "Hello, world! ☺", "valid": True}
|
||||
|
||||
arguments = tool_usage._validate_tool_input(tool_input)
|
||||
assert arguments == expected_arguments
|
||||
|
||||
Reference in New Issue
Block a user