mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-18 13:28:31 +00:00
Compare commits
8 Commits
lg-update-
...
feature/hu
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
02ea8e31dc | ||
|
|
a07c255e06 | ||
|
|
1774fe8561 | ||
|
|
5cc8c9c2b3 | ||
|
|
3f9c6d4ce9 | ||
|
|
72256f6c06 | ||
|
|
27c12e2ea3 | ||
|
|
c8f5bdc19f |
@@ -23,6 +23,7 @@ Tasks in CrewAI can be designed to require collaboration between agents. For exa
|
|||||||
| **Output Pydantic** *(optional)* | Takes a pydantic model and returns the output as a pydantic object. **Agent LLM needs to be using an OpenAI client, could be Ollama for example but using the OpenAI wrapper** |
|
| **Output Pydantic** *(optional)* | Takes a pydantic model and returns the output as a pydantic object. **Agent LLM needs to be using an OpenAI client, could be Ollama for example but using the OpenAI wrapper** |
|
||||||
| **Output File** *(optional)* | Takes a file path and saves the output of the task on it. |
|
| **Output File** *(optional)* | Takes a file path and saves the output of the task on it. |
|
||||||
| **Callback** *(optional)* | A function to be executed after the task is completed. |
|
| **Callback** *(optional)* | A function to be executed after the task is completed. |
|
||||||
|
| **Human Input** *(optional)* | Indicates whether the agent should ask for feedback at the end of the task |
|
||||||
|
|
||||||
## Creating a Task
|
## Creating a Task
|
||||||
|
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ Human input plays a pivotal role in several agent execution scenarios, enabling
|
|||||||
|
|
||||||
## Using Human Input with CrewAI
|
## Using Human Input with CrewAI
|
||||||
|
|
||||||
Incorporating human input with CrewAI is straightforward, enhancing the agent's ability to make informed decisions. While the documentation previously mentioned using a "LangChain Tool" and a specific "DuckDuckGoSearchRun" tool from `langchain_community.tools`, it's important to clarify that the integration of such tools should align with the actual capabilities and configurations defined within your `Agent` class setup.
|
Incorporating human input with CrewAI is straightforward, enhancing the agent's ability to make informed decisions. While the documentation previously mentioned using a "LangChain Tool" and a specific "DuckDuckGoSearchRun" tool from `langchain_community.tools`, it's important to clarify that the integration of such tools should align with the actual capabilities and configurations defined within your `Agent` class setup. Now it is a simple flag in the task itself that needs to be turned on.
|
||||||
|
|
||||||
### Example:
|
### Example:
|
||||||
|
|
||||||
@@ -23,14 +23,10 @@ import os
|
|||||||
from crewai import Agent, Task, Crew
|
from crewai import Agent, Task, Crew
|
||||||
from crewai_tools import SerperDevTool
|
from crewai_tools import SerperDevTool
|
||||||
|
|
||||||
from langchain.agents import load_tools
|
|
||||||
|
|
||||||
os.environ["SERPER_API_KEY"] = "Your Key" # serper.dev API key
|
os.environ["SERPER_API_KEY"] = "Your Key" # serper.dev API key
|
||||||
os.environ["OPENAI_API_KEY"] = "Your Key"
|
os.environ["OPENAI_API_KEY"] = "Your Key"
|
||||||
|
|
||||||
|
# Loading Tools
|
||||||
# Loading Human Tools
|
|
||||||
human_tools = load_tools(["human"])
|
|
||||||
search_tool = SerperDevTool()
|
search_tool = SerperDevTool()
|
||||||
|
|
||||||
# Define your agents with roles, goals, and tools
|
# Define your agents with roles, goals, and tools
|
||||||
@@ -44,7 +40,7 @@ researcher = Agent(
|
|||||||
),
|
),
|
||||||
verbose=True,
|
verbose=True,
|
||||||
allow_delegation=False,
|
allow_delegation=False,
|
||||||
tools=[search_tool]+human_tools # Passing human tools to the agent
|
tools=[search_tool]
|
||||||
)
|
)
|
||||||
writer = Agent(
|
writer = Agent(
|
||||||
role='Tech Content Strategist',
|
role='Tech Content Strategist',
|
||||||
@@ -67,6 +63,7 @@ task1 = Task(
|
|||||||
),
|
),
|
||||||
expected_output='A comprehensive full report on the latest AI advancements in 2024, leave nothing out',
|
expected_output='A comprehensive full report on the latest AI advancements in 2024, leave nothing out',
|
||||||
agent=researcher,
|
agent=researcher,
|
||||||
|
human_input=True, # setting the flag on for human input in this task
|
||||||
)
|
)
|
||||||
|
|
||||||
task2 = Task(
|
task2 = Task(
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ from crewai.utilities import I18N
|
|||||||
|
|
||||||
class CrewAgentExecutor(AgentExecutor):
|
class CrewAgentExecutor(AgentExecutor):
|
||||||
_i18n: I18N = I18N()
|
_i18n: I18N = I18N()
|
||||||
|
should_ask_for_human_input: bool = False
|
||||||
llm: Any = None
|
llm: Any = None
|
||||||
iterations: int = 0
|
iterations: int = 0
|
||||||
task: Any = None
|
task: Any = None
|
||||||
@@ -54,6 +55,9 @@ class CrewAgentExecutor(AgentExecutor):
|
|||||||
[tool.name for tool in self.tools], excluded_colors=["green", "red"]
|
[tool.name for tool in self.tools], excluded_colors=["green", "red"]
|
||||||
)
|
)
|
||||||
intermediate_steps: List[Tuple[AgentAction, str]] = []
|
intermediate_steps: List[Tuple[AgentAction, str]] = []
|
||||||
|
# Allowing human input given task setting
|
||||||
|
if self.task.human_input:
|
||||||
|
self.should_ask_for_human_input = True
|
||||||
# Let's start tracking the number of iterations and time elapsed
|
# Let's start tracking the number of iterations and time elapsed
|
||||||
self.iterations = 0
|
self.iterations = 0
|
||||||
time_elapsed = 0.0
|
time_elapsed = 0.0
|
||||||
@@ -169,6 +173,22 @@ class CrewAgentExecutor(AgentExecutor):
|
|||||||
|
|
||||||
# If the tool chosen is the finishing tool, then we end and return.
|
# If the tool chosen is the finishing tool, then we end and return.
|
||||||
if isinstance(output, AgentFinish):
|
if isinstance(output, AgentFinish):
|
||||||
|
if self.should_ask_for_human_input:
|
||||||
|
# Making sure we only ask for it once, so disabling for the next thought loop
|
||||||
|
self.should_ask_for_human_input = False
|
||||||
|
human_feedback = self._ask_human_input(output.return_values["output"])
|
||||||
|
action = AgentAction(
|
||||||
|
tool="Human Input", tool_input=human_feedback, log=output.log
|
||||||
|
)
|
||||||
|
yield AgentStep(
|
||||||
|
action=action,
|
||||||
|
observation=self._i18n.slice("human_feedback").format(
|
||||||
|
human_feedback=human_feedback
|
||||||
|
),
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
else:
|
||||||
yield output
|
yield output
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -203,3 +223,9 @@ class CrewAgentExecutor(AgentExecutor):
|
|||||||
tools=", ".join([tool.name for tool in self.tools]),
|
tools=", ".join([tool.name for tool in self.tools]),
|
||||||
)
|
)
|
||||||
yield AgentStep(action=agent_action, observation=observation)
|
yield AgentStep(action=agent_action, observation=observation)
|
||||||
|
|
||||||
|
def _ask_human_input(self, final_answer: dict) -> str:
|
||||||
|
"""Get human input."""
|
||||||
|
return input(
|
||||||
|
self._i18n.slice("getting_input").format(final_answer=final_answer)
|
||||||
|
)
|
||||||
|
|||||||
@@ -70,6 +70,10 @@ class Task(BaseModel):
|
|||||||
frozen=True,
|
frozen=True,
|
||||||
description="Unique identifier for the object, not set by user.",
|
description="Unique identifier for the object, not set by user.",
|
||||||
)
|
)
|
||||||
|
human_input: Optional[bool] = Field(
|
||||||
|
description="Whether the task should have a human review the final answer of the agent",
|
||||||
|
default=False,
|
||||||
|
)
|
||||||
|
|
||||||
def __init__(__pydantic_self__, **data):
|
def __init__(__pydantic_self__, **data):
|
||||||
config = data.pop("config", {})
|
config = data.pop("config", {})
|
||||||
|
|||||||
@@ -15,7 +15,9 @@
|
|||||||
"final_answer_format": "If you don't need to use any more tools, you must give your best complete final answer, make sure it satisfy the expect criteria, use the EXACT format below:\n\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\n\n",
|
"final_answer_format": "If you don't need to use any more tools, you must give your best complete final answer, make sure it satisfy the expect criteria, use the EXACT format below:\n\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\n\n",
|
||||||
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nI just remembered the expected format I must follow:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task\nYour final answer must be the great and the most complete as possible, it must be outcome described\n\n",
|
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nI just remembered the expected format I must follow:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task\nYour final answer must be the great and the most complete as possible, it must be outcome described\n\n",
|
||||||
"task_with_context": "{task}\n\nThis is the context you're working with:\n{context}",
|
"task_with_context": "{task}\n\nThis is the context you're working with:\n{context}",
|
||||||
"expected_output": "\nThis is the expect criteria for your final answer: {expected_output} \n you MUST return the actual complete content as the final answer, not a summary."
|
"expected_output": "\nThis is the expect criteria for your final answer: {expected_output} \n you MUST return the actual complete content as the final answer, not a summary.",
|
||||||
|
"human_feedback": "You got human feedback on your work, re-avaluate it and give a new Final Answer when ready.\n {human_feedback}",
|
||||||
|
"getting_input": "This is the agent final answer: {final_answer}\nPlease provide a feedback: "
|
||||||
},
|
},
|
||||||
"errors": {
|
"errors": {
|
||||||
"unexpected_format": "\nSorry, I didn't use the expected format, I MUST either use a tool (use one at time) OR give my best final answer.\n",
|
"unexpected_format": "\nSorry, I didn't use the expected format, I MUST either use a tool (use one at time) OR give my best final answer.\n",
|
||||||
|
|||||||
@@ -680,3 +680,30 @@ def test_agent_definition_based_on_dict():
|
|||||||
assert agent.backstory == "test backstory"
|
assert agent.backstory == "test backstory"
|
||||||
assert agent.verbose == True
|
assert agent.verbose == True
|
||||||
assert agent.tools == []
|
assert agent.tools == []
|
||||||
|
|
||||||
|
|
||||||
|
# test for human input
|
||||||
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
|
def test_agent_human_input():
|
||||||
|
from unittest.mock import patch
|
||||||
|
|
||||||
|
config = {
|
||||||
|
"role": "test role",
|
||||||
|
"goal": "test goal",
|
||||||
|
"backstory": "test backstory",
|
||||||
|
}
|
||||||
|
|
||||||
|
agent = Agent(config=config)
|
||||||
|
|
||||||
|
task = Task(
|
||||||
|
agent=agent,
|
||||||
|
description="Say the word: Hi",
|
||||||
|
expected_output="The word: Hi",
|
||||||
|
human_input=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
with patch.object(CrewAgentExecutor, "_ask_human_input") as mock_human_input:
|
||||||
|
mock_human_input.return_value = "Hello"
|
||||||
|
output = agent.execute_task(task)
|
||||||
|
mock_human_input.assert_called_once()
|
||||||
|
assert output == "Hello"
|
||||||
|
|||||||
381
tests/cassettes/test_agent_human_input.yaml
Normal file
381
tests/cassettes/test_agent_human_input.yaml
Normal file
@@ -0,0 +1,381 @@
|
|||||||
|
interactions:
|
||||||
|
- request:
|
||||||
|
body: '{"messages": [{"role": "user", "content": "You are test role. test backstory\nYour
|
||||||
|
personal goal is: test goalTo give my best complete final answer to the task
|
||||||
|
use the exact following format:\n\nThought: I now can give a great answer\nFinal
|
||||||
|
Answer: my best complete final answer to the task.\nYour final answer must be
|
||||||
|
the great and the most complete as possible, it must be outcome described.\n\nI
|
||||||
|
MUST use these formats, my job depends on it!\n\nThought: \n\nCurrent Task:
|
||||||
|
Say the word: Hi\n\nThis is the expect criteria for your final answer: The word:
|
||||||
|
Hi \n you MUST return the actual complete content as the final answer, not a
|
||||||
|
summary.\n\nBegin! This is VERY important to you, use the tools available and
|
||||||
|
give your best Final Answer, your job depends on it!\n\nThought: \n"}], "model":
|
||||||
|
"gpt-4", "n": 1, "stop": ["\nObservation"], "stream": true, "temperature": 0.7}'
|
||||||
|
headers:
|
||||||
|
accept:
|
||||||
|
- application/json
|
||||||
|
accept-encoding:
|
||||||
|
- gzip, deflate, br
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '871'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
host:
|
||||||
|
- api.openai.com
|
||||||
|
user-agent:
|
||||||
|
- OpenAI/Python 1.13.3
|
||||||
|
x-stainless-arch:
|
||||||
|
- other:amd64
|
||||||
|
x-stainless-async:
|
||||||
|
- 'false'
|
||||||
|
x-stainless-lang:
|
||||||
|
- python
|
||||||
|
x-stainless-os:
|
||||||
|
- Windows
|
||||||
|
x-stainless-package-version:
|
||||||
|
- 1.13.3
|
||||||
|
x-stainless-runtime:
|
||||||
|
- CPython
|
||||||
|
x-stainless-runtime-version:
|
||||||
|
- 3.10.10
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: 'data: {"id":"chatcmpl-99BOpci8MKSrJuksy3er9xP4VPtt3","object":"chat.completion.chunk","created":1711975799,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOpci8MKSrJuksy3er9xP4VPtt3","object":"chat.completion.chunk","created":1711975799,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"I"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOpci8MKSrJuksy3er9xP4VPtt3","object":"chat.completion.chunk","created":1711975799,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||||
|
now"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOpci8MKSrJuksy3er9xP4VPtt3","object":"chat.completion.chunk","created":1711975799,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||||
|
can"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOpci8MKSrJuksy3er9xP4VPtt3","object":"chat.completion.chunk","created":1711975799,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||||
|
give"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOpci8MKSrJuksy3er9xP4VPtt3","object":"chat.completion.chunk","created":1711975799,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||||
|
a"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOpci8MKSrJuksy3er9xP4VPtt3","object":"chat.completion.chunk","created":1711975799,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||||
|
great"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOpci8MKSrJuksy3er9xP4VPtt3","object":"chat.completion.chunk","created":1711975799,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||||
|
answer"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOpci8MKSrJuksy3er9xP4VPtt3","object":"chat.completion.chunk","created":1711975799,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"\n\n"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOpci8MKSrJuksy3er9xP4VPtt3","object":"chat.completion.chunk","created":1711975799,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOpci8MKSrJuksy3er9xP4VPtt3","object":"chat.completion.chunk","created":1711975799,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||||
|
Answer"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOpci8MKSrJuksy3er9xP4VPtt3","object":"chat.completion.chunk","created":1711975799,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOpci8MKSrJuksy3er9xP4VPtt3","object":"chat.completion.chunk","created":1711975799,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||||
|
\n"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOpci8MKSrJuksy3er9xP4VPtt3","object":"chat.completion.chunk","created":1711975799,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Hi"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOpci8MKSrJuksy3er9xP4VPtt3","object":"chat.completion.chunk","created":1711975799,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: [DONE]
|
||||||
|
|
||||||
|
|
||||||
|
'
|
||||||
|
headers:
|
||||||
|
CF-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
CF-RAY:
|
||||||
|
- 86d8b3c50a3900fe-GRU
|
||||||
|
Cache-Control:
|
||||||
|
- no-cache, must-revalidate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Type:
|
||||||
|
- text/event-stream
|
||||||
|
Date:
|
||||||
|
- Mon, 01 Apr 2024 12:49:59 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
Set-Cookie:
|
||||||
|
- __cf_bm=SKbA6Tkvgm70ubrePPCA0E3p7jCx6I0hvl21nUYJZfs-1711975799-1.0.1.1-vpC0CATlcE3u.X_XqVu7m5uBcvIfSZLza9_rT63hkxowZaPpgUjsvUXJODkXHzs99U.JWBrunylSpl3oOOvOPA;
|
||||||
|
path=/; expires=Mon, 01-Apr-24 13:19:59 GMT; domain=.api.openai.com; HttpOnly;
|
||||||
|
Secure; SameSite=None
|
||||||
|
- _cfuvid=uA0B4Boqw_bNZvnzP5HAJVToe90yw8F9rVggJtXKT_4-1711975799706-0.0.1.1-604800000;
|
||||||
|
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
access-control-allow-origin:
|
||||||
|
- '*'
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
openai-model:
|
||||||
|
- gpt-4-0613
|
||||||
|
openai-organization:
|
||||||
|
- crewai-iuxna1
|
||||||
|
openai-processing-ms:
|
||||||
|
- '240'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=15724800; includeSubDomains
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '10000'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '300000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '9999'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '299804'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 6ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 39ms
|
||||||
|
x-request-id:
|
||||||
|
- req_1612b410e539e01c9e167a201fe5932d
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
- request:
|
||||||
|
body: '{"messages": [{"role": "user", "content": "You are test role. test backstory\nYour
|
||||||
|
personal goal is: test goalTo give my best complete final answer to the task
|
||||||
|
use the exact following format:\n\nThought: I now can give a great answer\nFinal
|
||||||
|
Answer: my best complete final answer to the task.\nYour final answer must be
|
||||||
|
the great and the most complete as possible, it must be outcome described.\n\nI
|
||||||
|
MUST use these formats, my job depends on it!\n\nThought: \n\nCurrent Task:
|
||||||
|
Say the word: Hi\n\nThis is the expect criteria for your final answer: The word:
|
||||||
|
Hi \n you MUST return the actual complete content as the final answer, not a
|
||||||
|
summary.\n\nBegin! This is VERY important to you, use the tools available and
|
||||||
|
give your best Final Answer, your job depends on it!\n\nThought: \nI now can
|
||||||
|
give a great answer\n\nFinal Answer: \nHi\nObservation: You got human feedback
|
||||||
|
on your work, re-avaluate it and give a new Final Answer when ready.\n Hello\n"}],
|
||||||
|
"model": "gpt-4", "n": 1, "stop": ["\nObservation"], "stream": true, "temperature":
|
||||||
|
0.7}'
|
||||||
|
headers:
|
||||||
|
accept:
|
||||||
|
- application/json
|
||||||
|
accept-encoding:
|
||||||
|
- gzip, deflate, br
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '1038'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
cookie:
|
||||||
|
- __cf_bm=SKbA6Tkvgm70ubrePPCA0E3p7jCx6I0hvl21nUYJZfs-1711975799-1.0.1.1-vpC0CATlcE3u.X_XqVu7m5uBcvIfSZLza9_rT63hkxowZaPpgUjsvUXJODkXHzs99U.JWBrunylSpl3oOOvOPA;
|
||||||
|
_cfuvid=uA0B4Boqw_bNZvnzP5HAJVToe90yw8F9rVggJtXKT_4-1711975799706-0.0.1.1-604800000
|
||||||
|
host:
|
||||||
|
- api.openai.com
|
||||||
|
user-agent:
|
||||||
|
- OpenAI/Python 1.13.3
|
||||||
|
x-stainless-arch:
|
||||||
|
- other:amd64
|
||||||
|
x-stainless-async:
|
||||||
|
- 'false'
|
||||||
|
x-stainless-lang:
|
||||||
|
- python
|
||||||
|
x-stainless-os:
|
||||||
|
- Windows
|
||||||
|
x-stainless-package-version:
|
||||||
|
- 1.13.3
|
||||||
|
x-stainless-runtime:
|
||||||
|
- CPython
|
||||||
|
x-stainless-runtime-version:
|
||||||
|
- 3.10.10
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: 'data: {"id":"chatcmpl-99BOq7DS8ZMoSHlojbgXCOiT2imcY","object":"chat.completion.chunk","created":1711975800,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOq7DS8ZMoSHlojbgXCOiT2imcY","object":"chat.completion.chunk","created":1711975800,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOq7DS8ZMoSHlojbgXCOiT2imcY","object":"chat.completion.chunk","created":1711975800,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOq7DS8ZMoSHlojbgXCOiT2imcY","object":"chat.completion.chunk","created":1711975800,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||||
|
\n"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOq7DS8ZMoSHlojbgXCOiT2imcY","object":"chat.completion.chunk","created":1711975800,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"The"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOq7DS8ZMoSHlojbgXCOiT2imcY","object":"chat.completion.chunk","created":1711975800,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||||
|
feedback"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOq7DS8ZMoSHlojbgXCOiT2imcY","object":"chat.completion.chunk","created":1711975800,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||||
|
received"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOq7DS8ZMoSHlojbgXCOiT2imcY","object":"chat.completion.chunk","created":1711975800,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||||
|
indicates"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOq7DS8ZMoSHlojbgXCOiT2imcY","object":"chat.completion.chunk","created":1711975800,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||||
|
my"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOq7DS8ZMoSHlojbgXCOiT2imcY","object":"chat.completion.chunk","created":1711975800,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||||
|
initial"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOq7DS8ZMoSHlojbgXCOiT2imcY","object":"chat.completion.chunk","created":1711975800,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||||
|
answer"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOq7DS8ZMoSHlojbgXCOiT2imcY","object":"chat.completion.chunk","created":1711975800,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||||
|
was"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOq7DS8ZMoSHlojbgXCOiT2imcY","object":"chat.completion.chunk","created":1711975800,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||||
|
incorrect"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOq7DS8ZMoSHlojbgXCOiT2imcY","object":"chat.completion.chunk","created":1711975800,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOq7DS8ZMoSHlojbgXCOiT2imcY","object":"chat.completion.chunk","created":1711975800,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||||
|
Evalu"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOq7DS8ZMoSHlojbgXCOiT2imcY","object":"chat.completion.chunk","created":1711975800,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"ating"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOq7DS8ZMoSHlojbgXCOiT2imcY","object":"chat.completion.chunk","created":1711975800,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||||
|
it"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOq7DS8ZMoSHlojbgXCOiT2imcY","object":"chat.completion.chunk","created":1711975800,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":","},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOq7DS8ZMoSHlojbgXCOiT2imcY","object":"chat.completion.chunk","created":1711975800,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||||
|
it"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOq7DS8ZMoSHlojbgXCOiT2imcY","object":"chat.completion.chunk","created":1711975800,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||||
|
seems"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOq7DS8ZMoSHlojbgXCOiT2imcY","object":"chat.completion.chunk","created":1711975800,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||||
|
I"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOq7DS8ZMoSHlojbgXCOiT2imcY","object":"chat.completion.chunk","created":1711975800,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||||
|
need"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOq7DS8ZMoSHlojbgXCOiT2imcY","object":"chat.completion.chunk","created":1711975800,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||||
|
to"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOq7DS8ZMoSHlojbgXCOiT2imcY","object":"chat.completion.chunk","created":1711975800,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||||
|
change"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOq7DS8ZMoSHlojbgXCOiT2imcY","object":"chat.completion.chunk","created":1711975800,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||||
|
my"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOq7DS8ZMoSHlojbgXCOiT2imcY","object":"chat.completion.chunk","created":1711975800,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||||
|
response"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOq7DS8ZMoSHlojbgXCOiT2imcY","object":"chat.completion.chunk","created":1711975800,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":".\n\n"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOq7DS8ZMoSHlojbgXCOiT2imcY","object":"chat.completion.chunk","created":1711975800,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOq7DS8ZMoSHlojbgXCOiT2imcY","object":"chat.completion.chunk","created":1711975800,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||||
|
Answer"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOq7DS8ZMoSHlojbgXCOiT2imcY","object":"chat.completion.chunk","created":1711975800,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOq7DS8ZMoSHlojbgXCOiT2imcY","object":"chat.completion.chunk","created":1711975800,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
||||||
|
\n"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOq7DS8ZMoSHlojbgXCOiT2imcY","object":"chat.completion.chunk","created":1711975800,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"Hello"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-99BOq7DS8ZMoSHlojbgXCOiT2imcY","object":"chat.completion.chunk","created":1711975800,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: [DONE]
|
||||||
|
|
||||||
|
|
||||||
|
'
|
||||||
|
headers:
|
||||||
|
CF-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
CF-RAY:
|
||||||
|
- 86d8b3cfaf4200fe-GRU
|
||||||
|
Cache-Control:
|
||||||
|
- no-cache, must-revalidate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Type:
|
||||||
|
- text/event-stream
|
||||||
|
Date:
|
||||||
|
- Mon, 01 Apr 2024 12:50:00 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
access-control-allow-origin:
|
||||||
|
- '*'
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
openai-model:
|
||||||
|
- gpt-4-0613
|
||||||
|
openai-organization:
|
||||||
|
- crewai-iuxna1
|
||||||
|
openai-processing-ms:
|
||||||
|
- '269'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=15724800; includeSubDomains
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '10000'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '300000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '9999'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '299763'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 6ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 47ms
|
||||||
|
x-request-id:
|
||||||
|
- req_58f19f788ea39618601b15c4a9ea5bdd
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
version: 1
|
||||||
Reference in New Issue
Block a user