mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-09 08:08:32 +00:00
prepping new version
This commit is contained in:
@@ -220,7 +220,7 @@ These methods provide flexibility in how you manage and execute tasks within you
|
|||||||
### Replaying from specific task:
|
### Replaying from specific task:
|
||||||
You can now replay from a specific task using our cli command replay.
|
You can now replay from a specific task using our cli command replay.
|
||||||
|
|
||||||
The replay_from_tasks feature in CrewAI allows you to replay from a specific task using the command-line interface (CLI). By running the command `crewai replay -t <task_id>`, you can specify the `task_id` for the replay process.
|
The replay feature in CrewAI allows you to replay from a specific task using the command-line interface (CLI). By running the command `crewai replay -t <task_id>`, you can specify the `task_id` for the replay process.
|
||||||
|
|
||||||
Kickoffs will now save the latest kickoffs returned task outputs locally for you to be able to replay from.
|
Kickoffs will now save the latest kickoffs returned task outputs locally for you to be able to replay from.
|
||||||
|
|
||||||
|
|||||||
@@ -36,14 +36,14 @@ To replay from a task programmatically, use the following steps:
|
|||||||
2. Execute the replay command within a try-except block to handle potential errors.
|
2. Execute the replay command within a try-except block to handle potential errors.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def replay_from_task():
|
def replay():
|
||||||
"""
|
"""
|
||||||
Replay the crew execution from a specific task.
|
Replay the crew execution from a specific task.
|
||||||
"""
|
"""
|
||||||
task_id = '<task_id>'
|
task_id = '<task_id>'
|
||||||
inputs = {"topic": "CrewAI Training"} # this is optional, you can pass in the inputs you want to replay otherwise uses the previous kickoffs inputs
|
inputs = {"topic": "CrewAI Training"} # this is optional, you can pass in the inputs you want to replay otherwise uses the previous kickoffs inputs
|
||||||
try:
|
try:
|
||||||
YourCrewName_Crew().crew().replay_from_task(task_id=task_id, inputs=inputs)
|
YourCrewName_Crew().crew().replay(task_id=task_id, inputs=inputs)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise Exception(f"An error occurred while replaying the crew: {e}")
|
raise Exception(f"An error occurred while replaying the crew: {e}")
|
||||||
688
poetry.lock
generated
688
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
|||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "crewai"
|
name = "crewai"
|
||||||
version = "0.41.0"
|
version = "0.41.1"
|
||||||
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
|
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
|
||||||
authors = ["Joao Moura <joao@crewai.com>"]
|
authors = ["Joao Moura <joao@crewai.com>"]
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ authors = ["Your Name <you@example.com>"]
|
|||||||
|
|
||||||
[tool.poetry.dependencies]
|
[tool.poetry.dependencies]
|
||||||
python = ">=3.10,<=3.13"
|
python = ">=3.10,<=3.13"
|
||||||
crewai = { extras = ["tools"], version = "^0.41.0" }
|
crewai = { extras = ["tools"], version = "^0.41.1" }
|
||||||
|
|
||||||
[tool.poetry.scripts]
|
[tool.poetry.scripts]
|
||||||
{{folder_name}} = "{{folder_name}}.main:run"
|
{{folder_name}} = "{{folder_name}}.main:run"
|
||||||
|
|||||||
163
tests/cassettes/test_replay_setup_context.yaml
Normal file
163
tests/cassettes/test_replay_setup_context.yaml
Normal file
@@ -0,0 +1,163 @@
|
|||||||
|
interactions:
|
||||||
|
- request:
|
||||||
|
body: '{"messages": [{"content": "You are test_agent. Test Description\nYour personal
|
||||||
|
goal is: Test GoalTo give my best complete final answer to the task use the
|
||||||
|
exact following format:\n\nThought: I now can give a great answer\nFinal Answer:
|
||||||
|
my best complete final answer to the task.\nYour final answer must be the great
|
||||||
|
and the most complete as possible, it must be outcome described.\n\nI MUST use
|
||||||
|
these formats, my job depends on it!\nCurrent Task: Test Task\n\nThis is the
|
||||||
|
expect criteria for your final answer: Say Hi to John \n you MUST return the
|
||||||
|
actual complete content as the final answer, not a summary.\n\nThis is the context
|
||||||
|
you''re working with:\ncontext raw output\n\nBegin! This is VERY important to
|
||||||
|
you, use the tools available and give your best Final Answer, your job depends
|
||||||
|
on it!\n\nThought:\n", "role": "user"}], "model": "gpt-4o", "logprobs": false,
|
||||||
|
"n": 1, "stop": ["\nObservation"], "stream": true, "temperature": 0.7}'
|
||||||
|
headers:
|
||||||
|
accept:
|
||||||
|
- application/json
|
||||||
|
accept-encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '937'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
host:
|
||||||
|
- api.openai.com
|
||||||
|
user-agent:
|
||||||
|
- OpenAI/Python 1.36.0
|
||||||
|
x-stainless-arch:
|
||||||
|
- arm64
|
||||||
|
x-stainless-async:
|
||||||
|
- 'false'
|
||||||
|
x-stainless-lang:
|
||||||
|
- python
|
||||||
|
x-stainless-os:
|
||||||
|
- MacOS
|
||||||
|
x-stainless-package-version:
|
||||||
|
- 1.36.0
|
||||||
|
x-stainless-runtime:
|
||||||
|
- CPython
|
||||||
|
x-stainless-runtime-version:
|
||||||
|
- 3.11.7
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: 'data: {"id":"chatcmpl-9n6wQ7bzZKcXAmiNgs4nn5Of0EFiM","object":"chat.completion.chunk","created":1721491782,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_400f27fa1f","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-9n6wQ7bzZKcXAmiNgs4nn5Of0EFiM","object":"chat.completion.chunk","created":1721491782,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_400f27fa1f","choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-9n6wQ7bzZKcXAmiNgs4nn5Of0EFiM","object":"chat.completion.chunk","created":1721491782,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_400f27fa1f","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-9n6wQ7bzZKcXAmiNgs4nn5Of0EFiM","object":"chat.completion.chunk","created":1721491782,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_400f27fa1f","choices":[{"index":0,"delta":{"content":"
|
||||||
|
I"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-9n6wQ7bzZKcXAmiNgs4nn5Of0EFiM","object":"chat.completion.chunk","created":1721491782,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_400f27fa1f","choices":[{"index":0,"delta":{"content":"
|
||||||
|
now"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-9n6wQ7bzZKcXAmiNgs4nn5Of0EFiM","object":"chat.completion.chunk","created":1721491782,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_400f27fa1f","choices":[{"index":0,"delta":{"content":"
|
||||||
|
can"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-9n6wQ7bzZKcXAmiNgs4nn5Of0EFiM","object":"chat.completion.chunk","created":1721491782,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_400f27fa1f","choices":[{"index":0,"delta":{"content":"
|
||||||
|
give"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-9n6wQ7bzZKcXAmiNgs4nn5Of0EFiM","object":"chat.completion.chunk","created":1721491782,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_400f27fa1f","choices":[{"index":0,"delta":{"content":"
|
||||||
|
a"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-9n6wQ7bzZKcXAmiNgs4nn5Of0EFiM","object":"chat.completion.chunk","created":1721491782,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_400f27fa1f","choices":[{"index":0,"delta":{"content":"
|
||||||
|
great"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-9n6wQ7bzZKcXAmiNgs4nn5Of0EFiM","object":"chat.completion.chunk","created":1721491782,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_400f27fa1f","choices":[{"index":0,"delta":{"content":"
|
||||||
|
answer"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-9n6wQ7bzZKcXAmiNgs4nn5Of0EFiM","object":"chat.completion.chunk","created":1721491782,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_400f27fa1f","choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-9n6wQ7bzZKcXAmiNgs4nn5Of0EFiM","object":"chat.completion.chunk","created":1721491782,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_400f27fa1f","choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-9n6wQ7bzZKcXAmiNgs4nn5Of0EFiM","object":"chat.completion.chunk","created":1721491782,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_400f27fa1f","choices":[{"index":0,"delta":{"content":"
|
||||||
|
Answer"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-9n6wQ7bzZKcXAmiNgs4nn5Of0EFiM","object":"chat.completion.chunk","created":1721491782,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_400f27fa1f","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-9n6wQ7bzZKcXAmiNgs4nn5Of0EFiM","object":"chat.completion.chunk","created":1721491782,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_400f27fa1f","choices":[{"index":0,"delta":{"content":"
|
||||||
|
Hi"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-9n6wQ7bzZKcXAmiNgs4nn5Of0EFiM","object":"chat.completion.chunk","created":1721491782,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_400f27fa1f","choices":[{"index":0,"delta":{"content":"
|
||||||
|
John"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-9n6wQ7bzZKcXAmiNgs4nn5Of0EFiM","object":"chat.completion.chunk","created":1721491782,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_400f27fa1f","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: [DONE]
|
||||||
|
|
||||||
|
|
||||||
|
'
|
||||||
|
headers:
|
||||||
|
CF-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
CF-RAY:
|
||||||
|
- 8a643794fe0341e9-EWR
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Type:
|
||||||
|
- text/event-stream; charset=utf-8
|
||||||
|
Date:
|
||||||
|
- Sat, 20 Jul 2024 16:09:42 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
Set-Cookie:
|
||||||
|
- __cf_bm=7kfE3khl2E.6zM44yel5nToHzdtz0QeQ4wkLuGYyqSs-1721491782-1.0.1.1-XUb95eXTriHvSUSCH.TCyAmCGCbPK6L7p_tRTDBon8Fo6ns8TDbDoDGA.wVCFI4MTXSxkqrjD0GpYDj4GBTeSQ;
|
||||||
|
path=/; expires=Sat, 20-Jul-24 16:39:42 GMT; domain=.api.openai.com; HttpOnly;
|
||||||
|
Secure; SameSite=None
|
||||||
|
- _cfuvid=iN41lAEk.DjpRMAtG.K0NEvIN0xB9eS0CUCU2iWmjv4-1721491782137-0.0.1.1-604800000;
|
||||||
|
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
X-Content-Type-Options:
|
||||||
|
- nosniff
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
openai-organization:
|
||||||
|
- crewai-iuxna1
|
||||||
|
openai-processing-ms:
|
||||||
|
- '104'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=15552000; includeSubDomains; preload
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '10000'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '30000000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '9999'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '29999791'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 6ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 0s
|
||||||
|
x-request-id:
|
||||||
|
- req_4d90924dd28a0fb48c857f03515f0ca8
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
version: 1
|
||||||
159
tests/cassettes/test_replay_with_context.yaml
Normal file
159
tests/cassettes/test_replay_with_context.yaml
Normal file
@@ -0,0 +1,159 @@
|
|||||||
|
interactions:
|
||||||
|
- request:
|
||||||
|
body: '{"messages": [{"content": "You are test_agent. Test Description\nYour personal
|
||||||
|
goal is: Test GoalTo give my best complete final answer to the task use the
|
||||||
|
exact following format:\n\nThought: I now can give a great answer\nFinal Answer:
|
||||||
|
my best complete final answer to the task.\nYour final answer must be the great
|
||||||
|
and the most complete as possible, it must be outcome described.\n\nI MUST use
|
||||||
|
these formats, my job depends on it!\nCurrent Task: Test Task\n\nThis is the
|
||||||
|
expect criteria for your final answer: Say Hi \n you MUST return the actual
|
||||||
|
complete content as the final answer, not a summary.\n\nThis is the context
|
||||||
|
you''re working with:\ncontext raw output\n\nBegin! This is VERY important to
|
||||||
|
you, use the tools available and give your best Final Answer, your job depends
|
||||||
|
on it!\n\nThought:\n", "role": "user"}], "model": "gpt-4o", "logprobs": false,
|
||||||
|
"n": 1, "stop": ["\nObservation"], "stream": true, "temperature": 0.7}'
|
||||||
|
headers:
|
||||||
|
accept:
|
||||||
|
- application/json
|
||||||
|
accept-encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '929'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
host:
|
||||||
|
- api.openai.com
|
||||||
|
user-agent:
|
||||||
|
- OpenAI/Python 1.36.0
|
||||||
|
x-stainless-arch:
|
||||||
|
- arm64
|
||||||
|
x-stainless-async:
|
||||||
|
- 'false'
|
||||||
|
x-stainless-lang:
|
||||||
|
- python
|
||||||
|
x-stainless-os:
|
||||||
|
- MacOS
|
||||||
|
x-stainless-package-version:
|
||||||
|
- 1.36.0
|
||||||
|
x-stainless-runtime:
|
||||||
|
- CPython
|
||||||
|
x-stainless-runtime-version:
|
||||||
|
- 3.11.7
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: 'data: {"id":"chatcmpl-9n6wPAClsh4tUGoLYKLh3VoX1vlAx","object":"chat.completion.chunk","created":1721491781,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_c4e5b6fa31","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-9n6wPAClsh4tUGoLYKLh3VoX1vlAx","object":"chat.completion.chunk","created":1721491781,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_c4e5b6fa31","choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-9n6wPAClsh4tUGoLYKLh3VoX1vlAx","object":"chat.completion.chunk","created":1721491781,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_c4e5b6fa31","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-9n6wPAClsh4tUGoLYKLh3VoX1vlAx","object":"chat.completion.chunk","created":1721491781,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_c4e5b6fa31","choices":[{"index":0,"delta":{"content":"
|
||||||
|
I"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-9n6wPAClsh4tUGoLYKLh3VoX1vlAx","object":"chat.completion.chunk","created":1721491781,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_c4e5b6fa31","choices":[{"index":0,"delta":{"content":"
|
||||||
|
now"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-9n6wPAClsh4tUGoLYKLh3VoX1vlAx","object":"chat.completion.chunk","created":1721491781,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_c4e5b6fa31","choices":[{"index":0,"delta":{"content":"
|
||||||
|
can"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-9n6wPAClsh4tUGoLYKLh3VoX1vlAx","object":"chat.completion.chunk","created":1721491781,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_c4e5b6fa31","choices":[{"index":0,"delta":{"content":"
|
||||||
|
give"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-9n6wPAClsh4tUGoLYKLh3VoX1vlAx","object":"chat.completion.chunk","created":1721491781,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_c4e5b6fa31","choices":[{"index":0,"delta":{"content":"
|
||||||
|
a"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-9n6wPAClsh4tUGoLYKLh3VoX1vlAx","object":"chat.completion.chunk","created":1721491781,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_c4e5b6fa31","choices":[{"index":0,"delta":{"content":"
|
||||||
|
great"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-9n6wPAClsh4tUGoLYKLh3VoX1vlAx","object":"chat.completion.chunk","created":1721491781,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_c4e5b6fa31","choices":[{"index":0,"delta":{"content":"
|
||||||
|
answer"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-9n6wPAClsh4tUGoLYKLh3VoX1vlAx","object":"chat.completion.chunk","created":1721491781,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_c4e5b6fa31","choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-9n6wPAClsh4tUGoLYKLh3VoX1vlAx","object":"chat.completion.chunk","created":1721491781,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_c4e5b6fa31","choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-9n6wPAClsh4tUGoLYKLh3VoX1vlAx","object":"chat.completion.chunk","created":1721491781,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_c4e5b6fa31","choices":[{"index":0,"delta":{"content":"
|
||||||
|
Answer"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-9n6wPAClsh4tUGoLYKLh3VoX1vlAx","object":"chat.completion.chunk","created":1721491781,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_c4e5b6fa31","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-9n6wPAClsh4tUGoLYKLh3VoX1vlAx","object":"chat.completion.chunk","created":1721491781,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_c4e5b6fa31","choices":[{"index":0,"delta":{"content":"
|
||||||
|
Hi"},"logprobs":null,"finish_reason":null}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: {"id":"chatcmpl-9n6wPAClsh4tUGoLYKLh3VoX1vlAx","object":"chat.completion.chunk","created":1721491781,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_c4e5b6fa31","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||||
|
|
||||||
|
|
||||||
|
data: [DONE]
|
||||||
|
|
||||||
|
|
||||||
|
'
|
||||||
|
headers:
|
||||||
|
CF-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
CF-RAY:
|
||||||
|
- 8a643791a80e8c96-EWR
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Type:
|
||||||
|
- text/event-stream; charset=utf-8
|
||||||
|
Date:
|
||||||
|
- Sat, 20 Jul 2024 16:09:41 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
Set-Cookie:
|
||||||
|
- __cf_bm=cam5sECdaTzbttLIOaiuvh9flDIAXp_FLPODnDEOn6k-1721491781-1.0.1.1-hyFl43P7HIWZsGueyWuDeO579sZ41as2mvrM.cQS1E8KSLG2ZZ0DxDGbVvHYRO0eflTUJohgZu6CGltvjQfMtQ;
|
||||||
|
path=/; expires=Sat, 20-Jul-24 16:39:41 GMT; domain=.api.openai.com; HttpOnly;
|
||||||
|
Secure; SameSite=None
|
||||||
|
- _cfuvid=nmlgS.bqXAu0rZ.OlHPfXrIrdnVgrBSW3e0UuU3N5ng-1721491781661-0.0.1.1-604800000;
|
||||||
|
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
X-Content-Type-Options:
|
||||||
|
- nosniff
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
openai-organization:
|
||||||
|
- crewai-iuxna1
|
||||||
|
openai-processing-ms:
|
||||||
|
- '126'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=15552000; includeSubDomains; preload
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '10000'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '30000000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '9999'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '29999794'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 6ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 0s
|
||||||
|
x-request-id:
|
||||||
|
- req_31484eeb0af939af4e0d9c47441ba2db
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
version: 1
|
||||||
@@ -1917,13 +1917,13 @@ def test_replay_feature():
|
|||||||
)
|
)
|
||||||
|
|
||||||
crew.kickoff()
|
crew.kickoff()
|
||||||
crew.replay_from_task(str(write.id))
|
crew.replay(str(write.id))
|
||||||
# Ensure context was passed correctly
|
# Ensure context was passed correctly
|
||||||
assert mock_execute_task.call_count == 3
|
assert mock_execute_task.call_count == 3
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
def test_crew_replay_from_task_error():
|
def test_crew_replay_error():
|
||||||
task = Task(
|
task = Task(
|
||||||
description="Come up with a list of 5 interesting ideas to explore for an article",
|
description="Come up with a list of 5 interesting ideas to explore for an article",
|
||||||
expected_output="5 bullet points with a paragraph for each idea.",
|
expected_output="5 bullet points with a paragraph for each idea.",
|
||||||
@@ -1936,7 +1936,7 @@ def test_crew_replay_from_task_error():
|
|||||||
)
|
)
|
||||||
|
|
||||||
with pytest.raises(TypeError) as e:
|
with pytest.raises(TypeError) as e:
|
||||||
crew.replay_from_task() # type: ignore purposefully throwing err
|
crew.replay() # type: ignore purposefully throwing err
|
||||||
assert "task_id is required" in str(e)
|
assert "task_id is required" in str(e)
|
||||||
|
|
||||||
|
|
||||||
@@ -2071,14 +2071,14 @@ def test_replay_task_with_context():
|
|||||||
with patch.object(Task, "execute_sync") as mock_replay_task:
|
with patch.object(Task, "execute_sync") as mock_replay_task:
|
||||||
mock_replay_task.return_value = mock_task_output4
|
mock_replay_task.return_value = mock_task_output4
|
||||||
|
|
||||||
replayed_output = crew.replay_from_task(str(task4.id))
|
replayed_output = crew.replay(str(task4.id))
|
||||||
assert replayed_output.raw == "Presentation on AI advancements..."
|
assert replayed_output.raw == "Presentation on AI advancements..."
|
||||||
|
|
||||||
db_handler.reset()
|
db_handler.reset()
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
def test_replay_from_task_with_context():
|
def test_replay_with_context():
|
||||||
agent = Agent(role="test_agent", backstory="Test Description", goal="Test Goal")
|
agent = Agent(role="test_agent", backstory="Test Description", goal="Test Goal")
|
||||||
task1 = Task(
|
task1 = Task(
|
||||||
description="Context Task", expected_output="Say Task Output", agent=agent
|
description="Context Task", expected_output="Say Task Output", agent=agent
|
||||||
@@ -2130,7 +2130,7 @@ def test_replay_from_task_with_context():
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
):
|
):
|
||||||
crew.replay_from_task(str(task2.id))
|
crew.replay(str(task2.id))
|
||||||
|
|
||||||
assert crew.tasks[1].context[0].output.raw == "context raw output"
|
assert crew.tasks[1].context[0].output.raw == "context raw output"
|
||||||
|
|
||||||
@@ -2192,7 +2192,7 @@ def test_replay_with_invalid_task_id():
|
|||||||
ValueError,
|
ValueError,
|
||||||
match="Task with id bf5b09c9-69bd-4eb8-be12-f9e5bae31c2d not found in the crew's tasks.",
|
match="Task with id bf5b09c9-69bd-4eb8-be12-f9e5bae31c2d not found in the crew's tasks.",
|
||||||
):
|
):
|
||||||
crew.replay_from_task("bf5b09c9-69bd-4eb8-be12-f9e5bae31c2d")
|
crew.replay("bf5b09c9-69bd-4eb8-be12-f9e5bae31c2d")
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
@@ -2251,13 +2251,13 @@ def test_replay_interpolates_inputs_properly(mock_interpolate_inputs):
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
):
|
):
|
||||||
crew.replay_from_task(str(task2.id))
|
crew.replay(str(task2.id))
|
||||||
assert crew._inputs == {"name": "John"}
|
assert crew._inputs == {"name": "John"}
|
||||||
assert mock_interpolate_inputs.call_count == 2
|
assert mock_interpolate_inputs.call_count == 2
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
def test_replay_from_task_setup_context():
|
def test_replay_setup_context():
|
||||||
agent = Agent(role="test_agent", backstory="Test Description", goal="Test Goal")
|
agent = Agent(role="test_agent", backstory="Test Description", goal="Test Goal")
|
||||||
task1 = Task(description="Context Task", expected_output="Say {name}", agent=agent)
|
task1 = Task(description="Context Task", expected_output="Say {name}", agent=agent)
|
||||||
task2 = Task(
|
task2 = Task(
|
||||||
@@ -2306,7 +2306,7 @@ def test_replay_from_task_setup_context():
|
|||||||
},
|
},
|
||||||
],
|
],
|
||||||
):
|
):
|
||||||
crew.replay_from_task(str(task2.id))
|
crew.replay(str(task2.id))
|
||||||
|
|
||||||
# Check if the first task's output was set correctly
|
# Check if the first task's output was set correctly
|
||||||
assert crew.tasks[0].output is not None
|
assert crew.tasks[0].output is not None
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
from unittest.mock import patch
|
from unittest.mock import patch
|
||||||
|
from crewai.tasks.task_output import TaskOutput
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
@@ -31,10 +32,15 @@ class TestCrewPlanner:
|
|||||||
|
|
||||||
def test_handle_crew_planning(self, crew_planner):
|
def test_handle_crew_planning(self, crew_planner):
|
||||||
with patch.object(Task, "execute_sync") as execute:
|
with patch.object(Task, "execute_sync") as execute:
|
||||||
execute.return_value = PlannerTaskPydanticOutput(
|
execute.return_value = TaskOutput(
|
||||||
list_of_plans_per_task=["Plan 1", "Plan 2", "Plan 3"]
|
description="Description",
|
||||||
|
agent="agent",
|
||||||
|
pydantic=PlannerTaskPydanticOutput(
|
||||||
|
list_of_plans_per_task=["Plan 1", "Plan 2", "Plan 3"]
|
||||||
|
),
|
||||||
)
|
)
|
||||||
result = crew_planner._handle_crew_planning()
|
result = crew_planner._handle_crew_planning()
|
||||||
|
|
||||||
assert isinstance(result, PlannerTaskPydanticOutput)
|
assert isinstance(result, PlannerTaskPydanticOutput)
|
||||||
assert len(result.list_of_plans_per_task) == len(crew_planner.tasks)
|
assert len(result.list_of_plans_per_task) == len(crew_planner.tasks)
|
||||||
execute.assert_called_once()
|
execute.assert_called_once()
|
||||||
|
|||||||
Reference in New Issue
Block a user