Merge branch 'main' into devin/1735621211-fix-llm-parameter-case-normalization

This commit is contained in:
Rip&Tear
2025-01-05 16:15:15 +08:00
committed by GitHub
55 changed files with 2677 additions and 1045 deletions

View File

@@ -1445,44 +1445,43 @@ def test_llm_call_with_all_attributes():
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_with_ollama_gemma():
def test_agent_with_ollama_llama3():
agent = Agent(
role="test role",
goal="test goal",
backstory="test backstory",
llm=LLM(
model="ollama/gemma2:latest",
base_url="http://localhost:8080",
),
llm=LLM(model="ollama/llama3.2:3b", base_url="http://localhost:11434"),
)
assert isinstance(agent.llm, LLM)
assert agent.llm.model == "ollama/gemma2:latest"
assert agent.llm.base_url == "http://localhost:8080"
assert agent.llm.model == "ollama/llama3.2:3b"
assert agent.llm.base_url == "http://localhost:11434"
task = "Respond in 20 words. Who are you?"
task = "Respond in 20 words. Which model are you?"
response = agent.llm.call([{"role": "user", "content": task}])
assert response
assert len(response.split()) <= 25 # Allow a little flexibility in word count
assert "Gemma" in response or "AI" in response or "language model" in response
assert "Llama3" in response or "AI" in response or "language model" in response
@pytest.mark.vcr(filter_headers=["authorization"])
def test_llm_call_with_ollama_gemma():
def test_llm_call_with_ollama_llama3():
llm = LLM(
model="ollama/gemma2:latest",
base_url="http://localhost:8080",
model="ollama/llama3.2:3b",
base_url="http://localhost:11434",
temperature=0.7,
max_tokens=30,
)
messages = [{"role": "user", "content": "Respond in 20 words. Who are you?"}]
messages = [
{"role": "user", "content": "Respond in 20 words. Which model are you?"}
]
response = llm.call(messages)
assert response
assert len(response.split()) <= 25 # Allow a little flexibility in word count
assert "Gemma" in response or "AI" in response or "language model" in response
assert "Llama3" in response or "AI" in response or "language model" in response
@pytest.mark.vcr(filter_headers=["authorization"])
@@ -1578,7 +1577,7 @@ def test_agent_execute_task_with_ollama():
role="test role",
goal="test goal",
backstory="test backstory",
llm=LLM(model="ollama/gemma2:latest", base_url="http://localhost:8080"),
llm=LLM(model="ollama/llama3.2:3b", base_url="http://localhost:11434"),
)
task = Task(

View File

@@ -7,7 +7,7 @@ from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.tools.base_tool import BaseTool
class TestAgent(BaseAgent):
class MockAgent(BaseAgent):
def execute_task(
self,
task: Any,
@@ -29,7 +29,7 @@ class TestAgent(BaseAgent):
def test_key():
agent = TestAgent(
agent = MockAgent(
role="test role",
goal="test goal",
backstory="test backstory",

View File

@@ -1,42 +1,6 @@
interactions:
- request:
body: !!binary |
CrcCCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkSjgIKEgoQY3Jld2FpLnRl
bGVtZXRyeRJoChA/Q8UW5bidCRtKvri5fOaNEgh5qLzvLvZJkioQVG9vbCBVc2FnZSBFcnJvcjAB
OYjFVQr1TPgXQXCXhwr1TPgXShoKDmNyZXdhaV92ZXJzaW9uEggKBjAuNjEuMHoCGAGFAQABAAAS
jQEKEChQTWQ07t26ELkZmP5RresSCHEivRGBpsP7KgpUb29sIFVzYWdlMAE5sKkbC/VM+BdB8MIc
C/VM+BdKGgoOY3Jld2FpX3ZlcnNpb24SCAoGMC42MS4wShkKCXRvb2xfbmFtZRIMCgpkdW1teV90
b29sSg4KCGF0dGVtcHRzEgIYAXoCGAGFAQABAAA=
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '314'
Content-Type:
- application/x-protobuf
User-Agent:
- OTel-OTLP-Exporter-Python/1.27.0
method: POST
uri: https://telemetry.crewai.com:4319/v1/traces
response:
body:
string: "\n\0"
headers:
Content-Length:
- '2'
Content-Type:
- application/x-protobuf
Date:
- Tue, 24 Sep 2024 21:57:54 GMT
status:
code: 200
message: OK
- request:
body: '{"model": "gemma2:latest", "prompt": "### System:\nYou are test role. test
body: '{"model": "llama3.2:3b", "prompt": "### System:\nYou are test role. test
backstory\nYour personal goal is: test goal\nTo give my best complete final
answer to the task use the exact following format:\n\nThought: I now can give
a great answer\nFinal Answer: Your final answer must be the great and the most
@@ -46,7 +10,7 @@ interactions:
explanation of AI\nyou MUST return the actual complete content as the final
answer, not a summary.\n\nBegin! This is VERY important to you, use the tools
available and give your best Final Answer, your job depends on it!\n\nThought:\n\n",
"options": {}, "stream": false}'
"options": {"stop": ["\nObservation:"]}, "stream": false}'
headers:
Accept:
- '*/*'
@@ -55,26 +19,26 @@ interactions:
Connection:
- keep-alive
Content-Length:
- '815'
- '839'
Content-Type:
- application/json
User-Agent:
- python-requests/2.31.0
- python-requests/2.32.3
method: POST
uri: http://localhost:8080/api/generate
uri: http://localhost:11434/api/generate
response:
body:
string: '{"model":"gemma2:latest","created_at":"2024-09-24T21:57:55.835715Z","response":"Thought:
I can explain AI in one sentence. \n\nFinal Answer: Artificial intelligence
(AI) is the ability of computer systems to perform tasks that typically require
human intelligence, such as learning, problem-solving, and decision-making. \n","done":true,"done_reason":"stop","context":[106,1645,108,6176,1479,235292,108,2045,708,2121,4731,235265,2121,135147,108,6922,3749,6789,603,235292,2121,6789,108,1469,2734,970,1963,3407,2048,3448,577,573,6911,1281,573,5463,2412,5920,235292,109,65366,235292,590,1490,798,2734,476,1775,3448,108,11263,10358,235292,3883,2048,3448,2004,614,573,1775,578,573,1546,3407,685,3077,235269,665,2004,614,17526,6547,235265,109,235285,44472,1281,1450,32808,235269,970,3356,12014,611,665,235341,109,6176,4926,235292,109,6846,12297,235292,36576,1212,16481,603,575,974,13060,109,1596,603,573,5246,12830,604,861,2048,3448,235292,586,974,235290,47366,15844,576,16481,108,4747,44472,2203,573,5579,3407,3381,685,573,2048,3448,235269,780,476,13367,235265,109,12694,235341,1417,603,50471,2845,577,692,235269,1281,573,8112,2506,578,2734,861,1963,14124,10358,235269,861,3356,12014,611,665,235341,109,65366,235292,109,107,108,106,2516,108,65366,235292,590,798,10200,16481,575,974,13060,235265,235248,109,11263,10358,235292,42456,17273,591,11716,235275,603,573,7374,576,6875,5188,577,3114,13333,674,15976,2817,3515,17273,235269,1582,685,6044,235269,3210,235290,60495,235269,578,4530,235290,14577,235265,139,108],"total_duration":3370959792,"load_duration":20611750,"prompt_eval_count":173,"prompt_eval_duration":688036000,"eval_count":51,"eval_duration":2660291000}'
string: '{"model":"llama3.2:3b","created_at":"2025-01-02T20:05:52.24992Z","response":"Final
Answer: Artificial Intelligence (AI) refers to the development of computer
systems capable of performing tasks that typically require human intelligence,
such as learning, problem-solving, decision-making, and perception.","done":true,"done_reason":"stop","context":[128006,9125,128007,271,38766,1303,33025,2696,25,6790,220,2366,18,271,128009,128006,882,128007,271,14711,744,512,2675,527,1296,3560,13,1296,93371,198,7927,4443,5915,374,25,1296,5915,198,1271,3041,856,1888,4686,1620,4320,311,279,3465,1005,279,4839,2768,3645,1473,85269,25,358,1457,649,3041,264,2294,4320,198,19918,22559,25,4718,1620,4320,2011,387,279,2294,323,279,1455,4686,439,3284,11,433,2011,387,15632,7633,382,40,28832,1005,1521,20447,11,856,2683,14117,389,433,2268,14711,2724,1473,5520,5546,25,83017,1148,15592,374,304,832,11914,271,2028,374,279,1755,13186,369,701,1620,4320,25,362,832,1355,18886,16540,315,15592,198,9514,28832,471,279,5150,4686,2262,439,279,1620,4320,11,539,264,12399,382,11382,0,1115,374,48174,3062,311,499,11,1005,279,7526,2561,323,3041,701,1888,13321,22559,11,701,2683,14117,389,433,2268,85269,1473,128009,128006,78191,128007,271,19918,22559,25,59294,22107,320,15836,8,19813,311,279,4500,315,6500,6067,13171,315,16785,9256,430,11383,1397,3823,11478,11,1778,439,6975,11,3575,99246,11,5597,28846,11,323,21063,13],"total_duration":1461909875,"load_duration":39886208,"prompt_eval_count":181,"prompt_eval_duration":701000000,"eval_count":39,"eval_duration":719000000}'
headers:
Content-Length:
- '1662'
- '1537'
Content-Type:
- application/json; charset=utf-8
Date:
- Tue, 24 Sep 2024 21:57:55 GMT
- Thu, 02 Jan 2025 20:05:52 GMT
status:
code: 200
message: OK

View File

@@ -1,397 +0,0 @@
interactions:
- request:
body: !!binary |
CumTAQokCiIKDHNlcnZpY2UubmFtZRISChBjcmV3QUktdGVsZW1ldHJ5Er+TAQoSChBjcmV3YWku
dGVsZW1ldHJ5EqoHChDvqD2QZooz9BkEwtbWjp4OEgjxh72KACHvZSoMQ3JldyBDcmVhdGVkMAE5
qMhNnvBM+BdBcO9PnvBM+BdKGgoOY3Jld2FpX3ZlcnNpb24SCAoGMC42MS4wShoKDnB5dGhvbl92
ZXJzaW9uEggKBjMuMTEuN0ouCghjcmV3X2tleRIiCiBkNTUxMTNiZTRhYTQxYmE2NDNkMzI2MDQy
YjJmMDNmMUoxCgdjcmV3X2lkEiYKJGY4YTA1OTA1LTk0OGEtNDQ0YS04NmJmLTJiNTNiNDkyYjgy
MkocCgxjcmV3X3Byb2Nlc3MSDAoKc2VxdWVudGlhbEoRCgtjcmV3X21lbW9yeRICEABKGgoUY3Jl
d19udW1iZXJfb2ZfdGFza3MSAhgBShsKFWNyZXdfbnVtYmVyX29mX2FnZW50cxICGAFKxwIKC2Ny
ZXdfYWdlbnRzErcCCrQCW3sia2V5IjogImUxNDhlNTMyMDI5MzQ5OWY4Y2ViZWE4MjZlNzI1ODJi
IiwgImlkIjogIjg1MGJjNWUwLTk4NTctNDhkOC1iNWZlLTJmZjk2OWExYTU3YiIsICJyb2xlIjog
InRlc3Qgcm9sZSIsICJ2ZXJib3NlPyI6IHRydWUsICJtYXhfaXRlciI6IDQsICJtYXhfcnBtIjog
MTAsICJmdW5jdGlvbl9jYWxsaW5nX2xsbSI6ICIiLCAibGxtIjogImdwdC00byIsICJkZWxlZ2F0
aW9uX2VuYWJsZWQ/IjogZmFsc2UsICJhbGxvd19jb2RlX2V4ZWN1dGlvbj8iOiBmYWxzZSwgIm1h
eF9yZXRyeV9saW1pdCI6IDIsICJ0b29sc19uYW1lcyI6IFtdfV1KkAIKCmNyZXdfdGFza3MSgQIK
/gFbeyJrZXkiOiAiNGEzMWI4NTEzM2EzYTI5NGM2ODUzZGE3NTdkNGJhZTciLCAiaWQiOiAiOTc1
ZDgwMjItMWJkMS00NjBlLTg2NmEtYjJmZGNiYjA4ZDliIiwgImFzeW5jX2V4ZWN1dGlvbj8iOiBm
YWxzZSwgImh1bWFuX2lucHV0PyI6IGZhbHNlLCAiYWdlbnRfcm9sZSI6ICJ0ZXN0IHJvbGUiLCAi
YWdlbnRfa2V5IjogImUxNDhlNTMyMDI5MzQ5OWY4Y2ViZWE4MjZlNzI1ODJiIiwgInRvb2xzX25h
bWVzIjogWyJnZXRfZmluYWxfYW5zd2VyIl19XXoCGAGFAQABAAASjgIKEP9UYSAOFQbZquSppN1j
IeUSCAgZmXUoJKFmKgxUYXNrIENyZWF0ZWQwATloPV+e8Ez4F0GYsl+e8Ez4F0ouCghjcmV3X2tl
eRIiCiBkNTUxMTNiZTRhYTQxYmE2NDNkMzI2MDQyYjJmMDNmMUoxCgdjcmV3X2lkEiYKJGY4YTA1
OTA1LTk0OGEtNDQ0YS04NmJmLTJiNTNiNDkyYjgyMkouCgh0YXNrX2tleRIiCiA0YTMxYjg1MTMz
YTNhMjk0YzY4NTNkYTc1N2Q0YmFlN0oxCgd0YXNrX2lkEiYKJDk3NWQ4MDIyLTFiZDEtNDYwZS04
NjZhLWIyZmRjYmIwOGQ5YnoCGAGFAQABAAASkwEKEEfiywgqgiUXE3KoUbrnHDQSCGmv+iM7Wc1Z
KgpUb29sIFVzYWdlMAE5kOybnvBM+BdBIM+cnvBM+BdKGgoOY3Jld2FpX3ZlcnNpb24SCAoGMC42
MS4wSh8KCXRvb2xfbmFtZRISChBnZXRfZmluYWxfYW5zd2VySg4KCGF0dGVtcHRzEgIYAXoCGAGF
AQABAAASkwEKEH7AHXpfmvwIkA45HB8YyY0SCAFRC+uJpsEZKgpUb29sIFVzYWdlMAE56PLdnvBM
+BdBYFbfnvBM+BdKGgoOY3Jld2FpX3ZlcnNpb24SCAoGMC42MS4wSh8KCXRvb2xfbmFtZRISChBn
ZXRfZmluYWxfYW5zd2VySg4KCGF0dGVtcHRzEgIYAXoCGAGFAQABAAASkwEKEIDKKEbYU4lcJF+a
WsAVZwESCI+/La7oL86MKgpUb29sIFVzYWdlMAE5yIkgn/BM+BdBWGwhn/BM+BdKGgoOY3Jld2Fp
X3ZlcnNpb24SCAoGMC42MS4wSh8KCXRvb2xfbmFtZRISChBnZXRfZmluYWxfYW5zd2VySg4KCGF0
dGVtcHRzEgIYAXoCGAGFAQABAAASnAEKEMTZ2IhpLz6J2hJhHBQ8/M4SCEuWz+vjzYifKhNUb29s
IFJlcGVhdGVkIFVzYWdlMAE5mAVhn/BM+BdBKOhhn/BM+BdKGgoOY3Jld2FpX3ZlcnNpb24SCAoG
MC42MS4wSh8KCXRvb2xfbmFtZRISChBnZXRfZmluYWxfYW5zd2VySg4KCGF0dGVtcHRzEgIYAXoC
GAGFAQABAAASkAIKED8C+t95p855kLcXs5Nnt/sSCM4XAhL6u8O8Kg5UYXNrIEV4ZWN1dGlvbjAB
OdD8X57wTPgXQUgno5/wTPgXSi4KCGNyZXdfa2V5EiIKIGQ1NTExM2JlNGFhNDFiYTY0M2QzMjYw
NDJiMmYwM2YxSjEKB2NyZXdfaWQSJgokZjhhMDU5MDUtOTQ4YS00NDRhLTg2YmYtMmI1M2I0OTJi
ODIySi4KCHRhc2tfa2V5EiIKIDRhMzFiODUxMzNhM2EyOTRjNjg1M2RhNzU3ZDRiYWU3SjEKB3Rh
c2tfaWQSJgokOTc1ZDgwMjItMWJkMS00NjBlLTg2NmEtYjJmZGNiYjA4ZDliegIYAYUBAAEAABLO
CwoQFlnZCfbZ3Dj0L9TAE5LrLBIIoFr7BZErFNgqDENyZXcgQ3JlYXRlZDABOVhDDaDwTPgXQSg/
D6DwTPgXShoKDmNyZXdhaV92ZXJzaW9uEggKBjAuNjEuMEoaCg5weXRob25fdmVyc2lvbhIICgYz
LjExLjdKLgoIY3Jld19rZXkSIgogOTRjMzBkNmMzYjJhYzhmYjk0YjJkY2ZjNTcyZDBmNTlKMQoH
Y3Jld19pZBImCiQyMzM2MzRjNi1lNmQ2LTQ5ZTYtODhhZS1lYWUxYTM5YjBlMGZKHAoMY3Jld19w
cm9jZXNzEgwKCnNlcXVlbnRpYWxKEQoLY3Jld19tZW1vcnkSAhAAShoKFGNyZXdfbnVtYmVyX29m
X3Rhc2tzEgIYAkobChVjcmV3X251bWJlcl9vZl9hZ2VudHMSAhgCSv4ECgtjcmV3X2FnZW50cxLu
BArrBFt7ImtleSI6ICJlMTQ4ZTUzMjAyOTM0OTlmOGNlYmVhODI2ZTcyNTgyYiIsICJpZCI6ICI0
MjAzZjIyYi0wNWM3LTRiNjUtODBjMS1kM2Y0YmFlNzZhNDYiLCAicm9sZSI6ICJ0ZXN0IHJvbGUi
LCAidmVyYm9zZT8iOiB0cnVlLCAibWF4X2l0ZXIiOiAyLCAibWF4X3JwbSI6IDEwLCAiZnVuY3Rp
b25fY2FsbGluZ19sbG0iOiAiIiwgImxsbSI6ICJncHQtNG8iLCAiZGVsZWdhdGlvbl9lbmFibGVk
PyI6IGZhbHNlLCAiYWxsb3dfY29kZV9leGVjdXRpb24/IjogZmFsc2UsICJtYXhfcmV0cnlfbGlt
aXQiOiAyLCAidG9vbHNfbmFtZXMiOiBbXX0sIHsia2V5IjogImU3ZThlZWE4ODZiY2I4ZjEwNDVh
YmVlY2YxNDI1ZGI3IiwgImlkIjogImZjOTZjOTQ1LTY4ZDUtNDIxMy05NmNkLTNmYTAwNmUyZTYz
MCIsICJyb2xlIjogInRlc3Qgcm9sZTIiLCAidmVyYm9zZT8iOiB0cnVlLCAibWF4X2l0ZXIiOiAx
LCAibWF4X3JwbSI6IG51bGwsICJmdW5jdGlvbl9jYWxsaW5nX2xsbSI6ICIiLCAibGxtIjogImdw
dC00byIsICJkZWxlZ2F0aW9uX2VuYWJsZWQ/IjogZmFsc2UsICJhbGxvd19jb2RlX2V4ZWN1dGlv
bj8iOiBmYWxzZSwgIm1heF9yZXRyeV9saW1pdCI6IDIsICJ0b29sc19uYW1lcyI6IFtdfV1K/QMK
CmNyZXdfdGFza3MS7gMK6wNbeyJrZXkiOiAiMzIyZGRhZTNiYzgwYzFkNDViODVmYTc3NTZkYjg2
NjUiLCAiaWQiOiAiOTVjYTg4NDItNmExMi00MGQ5LWIwZDItNGI0MzYxYmJlNTZkIiwgImFzeW5j
X2V4ZWN1dGlvbj8iOiBmYWxzZSwgImh1bWFuX2lucHV0PyI6IGZhbHNlLCAiYWdlbnRfcm9sZSI6
ICJ0ZXN0IHJvbGUiLCAiYWdlbnRfa2V5IjogImUxNDhlNTMyMDI5MzQ5OWY4Y2ViZWE4MjZlNzI1
ODJiIiwgInRvb2xzX25hbWVzIjogW119LCB7ImtleSI6ICI1ZTljYTdkNjRiNDIwNWJiN2M0N2Uw
YjNmY2I1ZDIxZiIsICJpZCI6ICI5NzI5MTg2Yy1kN2JlLTRkYjQtYTk0ZS02OWU5OTk2NTI3MDAi
LCAiYXN5bmNfZXhlY3V0aW9uPyI6IGZhbHNlLCAiaHVtYW5faW5wdXQ/IjogZmFsc2UsICJhZ2Vu
dF9yb2xlIjogInRlc3Qgcm9sZTIiLCAiYWdlbnRfa2V5IjogImU3ZThlZWE4ODZiY2I4ZjEwNDVh
YmVlY2YxNDI1ZGI3IiwgInRvb2xzX25hbWVzIjogWyJnZXRfZmluYWxfYW5zd2VyIl19XXoCGAGF
AQABAAASjgIKEC/YM2OukRrSg+ZAev4VhGESCOQ5RvzSS5IEKgxUYXNrIENyZWF0ZWQwATmQJx6g
8Ez4F0EgjR6g8Ez4F0ouCghjcmV3X2tleRIiCiA5NGMzMGQ2YzNiMmFjOGZiOTRiMmRjZmM1NzJk
MGY1OUoxCgdjcmV3X2lkEiYKJDIzMzYzNGM2LWU2ZDYtNDllNi04OGFlLWVhZTFhMzliMGUwZkou
Cgh0YXNrX2tleRIiCiAzMjJkZGFlM2JjODBjMWQ0NWI4NWZhNzc1NmRiODY2NUoxCgd0YXNrX2lk
EiYKJDk1Y2E4ODQyLTZhMTItNDBkOS1iMGQyLTRiNDM2MWJiZTU2ZHoCGAGFAQABAAASkAIKEHqZ
L8s3clXQyVTemNcTCcQSCA0tzK95agRQKg5UYXNrIEV4ZWN1dGlvbjABOQC8HqDwTPgXQdgNSqDw
TPgXSi4KCGNyZXdfa2V5EiIKIDk0YzMwZDZjM2IyYWM4ZmI5NGIyZGNmYzU3MmQwZjU5SjEKB2Ny
ZXdfaWQSJgokMjMzNjM0YzYtZTZkNi00OWU2LTg4YWUtZWFlMWEzOWIwZTBmSi4KCHRhc2tfa2V5
EiIKIDMyMmRkYWUzYmM4MGMxZDQ1Yjg1ZmE3NzU2ZGI4NjY1SjEKB3Rhc2tfaWQSJgokOTVjYTg4
NDItNmExMi00MGQ5LWIwZDItNGI0MzYxYmJlNTZkegIYAYUBAAEAABKOAgoQjhKzodMUmQ8NWtdy
Uj99whIIBsGtAymZibwqDFRhc2sgQ3JlYXRlZDABOXjVVaDwTPgXQXhSVqDwTPgXSi4KCGNyZXdf
a2V5EiIKIDk0YzMwZDZjM2IyYWM4ZmI5NGIyZGNmYzU3MmQwZjU5SjEKB2NyZXdfaWQSJgokMjMz
NjM0YzYtZTZkNi00OWU2LTg4YWUtZWFlMWEzOWIwZTBmSi4KCHRhc2tfa2V5EiIKIDVlOWNhN2Q2
NGI0MjA1YmI3YzQ3ZTBiM2ZjYjVkMjFmSjEKB3Rhc2tfaWQSJgokOTcyOTE4NmMtZDdiZS00ZGI0
LWE5NGUtNjllOTk5NjUyNzAwegIYAYUBAAEAABKTAQoQx5IUsjAFMGNUaz5MHy20OBIIzl2tr25P
LL8qClRvb2wgVXNhZ2UwATkgt5Sg8Ez4F0GwFpag8Ez4F0oaCg5jcmV3YWlfdmVyc2lvbhIICgYw
LjYxLjBKHwoJdG9vbF9uYW1lEhIKEGdldF9maW5hbF9hbnN3ZXJKDgoIYXR0ZW1wdHMSAhgBegIY
AYUBAAEAABKQAgoQEkfcfCrzTYIM6GQXhknlexIIa/oxeT78OL8qDlRhc2sgRXhlY3V0aW9uMAE5
WIFWoPBM+BdBuL/GoPBM+BdKLgoIY3Jld19rZXkSIgogOTRjMzBkNmMzYjJhYzhmYjk0YjJkY2Zj
NTcyZDBmNTlKMQoHY3Jld19pZBImCiQyMzM2MzRjNi1lNmQ2LTQ5ZTYtODhhZS1lYWUxYTM5YjBl
MGZKLgoIdGFza19rZXkSIgogNWU5Y2E3ZDY0YjQyMDViYjdjNDdlMGIzZmNiNWQyMWZKMQoHdGFz
a19pZBImCiQ5NzI5MTg2Yy1kN2JlLTRkYjQtYTk0ZS02OWU5OTk2NTI3MDB6AhgBhQEAAQAAEqwH
ChDrKBdEe+Z5276g9fgg6VzjEgiJfnDwsv1SrCoMQ3JldyBDcmVhdGVkMAE5MLQYofBM+BdBQFIa
ofBM+BdKGgoOY3Jld2FpX3ZlcnNpb24SCAoGMC42MS4wShoKDnB5dGhvbl92ZXJzaW9uEggKBjMu
MTEuN0ouCghjcmV3X2tleRIiCiA3M2FhYzI4NWU2NzQ2NjY3Zjc1MTQ3NjcwMDAzNDExMEoxCgdj
cmV3X2lkEiYKJDg0NDY0YjhlLTRiZjctNDRiYy05MmUxLWE4ZDE1NGZlNWZkN0ocCgxjcmV3X3By
b2Nlc3MSDAoKc2VxdWVudGlhbEoRCgtjcmV3X21lbW9yeRICEABKGgoUY3Jld19udW1iZXJfb2Zf
dGFza3MSAhgBShsKFWNyZXdfbnVtYmVyX29mX2FnZW50cxICGAFKyQIKC2NyZXdfYWdlbnRzErkC
CrYCW3sia2V5IjogImUxNDhlNTMyMDI5MzQ5OWY4Y2ViZWE4MjZlNzI1ODJiIiwgImlkIjogIjk4
YmIwNGYxLTBhZGMtNGZiNC04YzM2LWM3M2Q1MzQ1ZGRhZCIsICJyb2xlIjogInRlc3Qgcm9sZSIs
ICJ2ZXJib3NlPyI6IHRydWUsICJtYXhfaXRlciI6IDEsICJtYXhfcnBtIjogbnVsbCwgImZ1bmN0
aW9uX2NhbGxpbmdfbGxtIjogIiIsICJsbG0iOiAiZ3B0LTRvIiwgImRlbGVnYXRpb25fZW5hYmxl
ZD8iOiBmYWxzZSwgImFsbG93X2NvZGVfZXhlY3V0aW9uPyI6IGZhbHNlLCAibWF4X3JldHJ5X2xp
bWl0IjogMiwgInRvb2xzX25hbWVzIjogW119XUqQAgoKY3Jld190YXNrcxKBAgr+AVt7ImtleSI6
ICJmN2E5ZjdiYjFhZWU0YjZlZjJjNTI2ZDBhOGMyZjJhYyIsICJpZCI6ICIxZjRhYzJhYS03YmQ4
LTQ1NWQtODgyMC1jMzZmMjJjMDY4MzciLCAiYXN5bmNfZXhlY3V0aW9uPyI6IGZhbHNlLCAiaHVt
YW5faW5wdXQ/IjogZmFsc2UsICJhZ2VudF9yb2xlIjogInRlc3Qgcm9sZSIsICJhZ2VudF9rZXki
OiAiZTE0OGU1MzIwMjkzNDk5ZjhjZWJlYTgyNmU3MjU4MmIiLCAidG9vbHNfbmFtZXMiOiBbImdl
dF9maW5hbF9hbnN3ZXIiXX1degIYAYUBAAEAABKOAgoQ0/vrakH7zD0uSvmVBUV8lxIIYe4YKcYG
hNgqDFRhc2sgQ3JlYXRlZDABOdBXKqHwTPgXQcCtKqHwTPgXSi4KCGNyZXdfa2V5EiIKIDczYWFj
Mjg1ZTY3NDY2NjdmNzUxNDc2NzAwMDM0MTEwSjEKB2NyZXdfaWQSJgokODQ0NjRiOGUtNGJmNy00
NGJjLTkyZTEtYThkMTU0ZmU1ZmQ3Si4KCHRhc2tfa2V5EiIKIGY3YTlmN2JiMWFlZTRiNmVmMmM1
MjZkMGE4YzJmMmFjSjEKB3Rhc2tfaWQSJgokMWY0YWMyYWEtN2JkOC00NTVkLTg4MjAtYzM2ZjIy
YzA2ODM3egIYAYUBAAEAABKkAQoQ5GDzHNlSdlcVDdxsI3abfRIIhYu8fZS3iA4qClRvb2wgVXNh
Z2UwATnIi2eh8Ez4F0FYbmih8Ez4F0oaCg5jcmV3YWlfdmVyc2lvbhIICgYwLjYxLjBKHwoJdG9v
bF9uYW1lEhIKEGdldF9maW5hbF9hbnN3ZXJKDgoIYXR0ZW1wdHMSAhgBSg8KA2xsbRIICgZncHQt
NG96AhgBhQEAAQAAEpACChAy85Jfr/EEIe1THU8koXoYEgjlkNn7xfysjioOVGFzayBFeGVjdXRp
b24wATm42Cqh8Ez4F0GgxZah8Ez4F0ouCghjcmV3X2tleRIiCiA3M2FhYzI4NWU2NzQ2NjY3Zjc1
MTQ3NjcwMDAzNDExMEoxCgdjcmV3X2lkEiYKJDg0NDY0YjhlLTRiZjctNDRiYy05MmUxLWE4ZDE1
NGZlNWZkN0ouCgh0YXNrX2tleRIiCiBmN2E5ZjdiYjFhZWU0YjZlZjJjNTI2ZDBhOGMyZjJhY0ox
Cgd0YXNrX2lkEiYKJDFmNGFjMmFhLTdiZDgtNDU1ZC04ODIwLWMzNmYyMmMwNjgzN3oCGAGFAQAB
AAASrAcKEG0ZVq5Ww+/A0wOY3HmKgq4SCMe0ooxqjqBlKgxDcmV3IENyZWF0ZWQwATlwmISi8Ez4
F0HYUYai8Ez4F0oaCg5jcmV3YWlfdmVyc2lvbhIICgYwLjYxLjBKGgoOcHl0aG9uX3ZlcnNpb24S
CAoGMy4xMS43Si4KCGNyZXdfa2V5EiIKIGQ1NTExM2JlNGFhNDFiYTY0M2QzMjYwNDJiMmYwM2Yx
SjEKB2NyZXdfaWQSJgokNzkyMWVlYmItMWI4NS00MzNjLWIxMDAtZDU4MmMyOTg5MzBkShwKDGNy
ZXdfcHJvY2VzcxIMCgpzZXF1ZW50aWFsShEKC2NyZXdfbWVtb3J5EgIQAEoaChRjcmV3X251bWJl
cl9vZl90YXNrcxICGAFKGwoVY3Jld19udW1iZXJfb2ZfYWdlbnRzEgIYAUrJAgoLY3Jld19hZ2Vu
dHMSuQIKtgJbeyJrZXkiOiAiZTE0OGU1MzIwMjkzNDk5ZjhjZWJlYTgyNmU3MjU4MmIiLCAiaWQi
OiAiZmRiZDI1MWYtYzUwOC00YmFhLTkwNjctN2U5YzQ2ZGZiZTJhIiwgInJvbGUiOiAidGVzdCBy
b2xlIiwgInZlcmJvc2U/IjogdHJ1ZSwgIm1heF9pdGVyIjogNiwgIm1heF9ycG0iOiBudWxsLCAi
ZnVuY3Rpb25fY2FsbGluZ19sbG0iOiAiIiwgImxsbSI6ICJncHQtNG8iLCAiZGVsZWdhdGlvbl9l
bmFibGVkPyI6IGZhbHNlLCAiYWxsb3dfY29kZV9leGVjdXRpb24/IjogZmFsc2UsICJtYXhfcmV0
cnlfbGltaXQiOiAyLCAidG9vbHNfbmFtZXMiOiBbXX1dSpACCgpjcmV3X3Rhc2tzEoECCv4BW3si
a2V5IjogIjRhMzFiODUxMzNhM2EyOTRjNjg1M2RhNzU3ZDRiYWU3IiwgImlkIjogIjA2YWFmM2Y1
LTE5ODctNDAxYS05Yzk0LWY3ZjM1YmQzMDg3OSIsICJhc3luY19leGVjdXRpb24/IjogZmFsc2Us
ICJodW1hbl9pbnB1dD8iOiBmYWxzZSwgImFnZW50X3JvbGUiOiAidGVzdCByb2xlIiwgImFnZW50
X2tleSI6ICJlMTQ4ZTUzMjAyOTM0OTlmOGNlYmVhODI2ZTcyNTgyYiIsICJ0b29sc19uYW1lcyI6
IFsiZ2V0X2ZpbmFsX2Fuc3dlciJdfV16AhgBhQEAAQAAEo4CChDT+zPZHwfacDilkzaZJ9uGEgip
Kr5r62JB+ioMVGFzayBDcmVhdGVkMAE56KeTovBM+BdB8PmTovBM+BdKLgoIY3Jld19rZXkSIgog
ZDU1MTEzYmU0YWE0MWJhNjQzZDMyNjA0MmIyZjAzZjFKMQoHY3Jld19pZBImCiQ3OTIxZWViYi0x
Yjg1LTQzM2MtYjEwMC1kNTgyYzI5ODkzMGRKLgoIdGFza19rZXkSIgogNGEzMWI4NTEzM2EzYTI5
NGM2ODUzZGE3NTdkNGJhZTdKMQoHdGFza19pZBImCiQwNmFhZjNmNS0xOTg3LTQwMWEtOWM5NC1m
N2YzNWJkMzA4Nzl6AhgBhQEAAQAAEpMBChCl85ZcL2Fa0N5QTl6EsIfnEghyDo3bxT+AkyoKVG9v
bCBVc2FnZTABOVBA2aLwTPgXQYAy2qLwTPgXShoKDmNyZXdhaV92ZXJzaW9uEggKBjAuNjEuMEof
Cgl0b29sX25hbWUSEgoQZ2V0X2ZpbmFsX2Fuc3dlckoOCghhdHRlbXB0cxICGAF6AhgBhQEAAQAA
EpwBChB22uwKhaur9zmeoeEMaRKzEgjrtSEzMbRdIioTVG9vbCBSZXBlYXRlZCBVc2FnZTABOQga
C6PwTPgXQaDRC6PwTPgXShoKDmNyZXdhaV92ZXJzaW9uEggKBjAuNjEuMEofCgl0b29sX25hbWUS
EgoQZ2V0X2ZpbmFsX2Fuc3dlckoOCghhdHRlbXB0cxICGAF6AhgBhQEAAQAAEpMBChArAfcRpE+W
02oszyzccbaWEghTAO9J3zq/kyoKVG9vbCBVc2FnZTABORBRTqPwTPgXQegnT6PwTPgXShoKDmNy
ZXdhaV92ZXJzaW9uEggKBjAuNjEuMEofCgl0b29sX25hbWUSEgoQZ2V0X2ZpbmFsX2Fuc3dlckoO
CghhdHRlbXB0cxICGAF6AhgBhQEAAQAAEpwBChBdtM3p3aqT7wTGaXi6el/4Egie6lFQpa+AfioT
VG9vbCBSZXBlYXRlZCBVc2FnZTABOdBg2KPwTPgXQehW2aPwTPgXShoKDmNyZXdhaV92ZXJzaW9u
EggKBjAuNjEuMEofCgl0b29sX25hbWUSEgoQZ2V0X2ZpbmFsX2Fuc3dlckoOCghhdHRlbXB0cxIC
GAF6AhgBhQEAAQAAEpMBChDq4OuaUKkNoi6jlMyahPJpEgg1MFDHktBxNSoKVG9vbCBVc2FnZTAB
ORD/K6TwTPgXQZgMLaTwTPgXShoKDmNyZXdhaV92ZXJzaW9uEggKBjAuNjEuMEofCgl0b29sX25h
bWUSEgoQZ2V0X2ZpbmFsX2Fuc3dlckoOCghhdHRlbXB0cxICGAF6AhgBhQEAAQAAEpACChBhvTmu
QWP+bx9JMmGpt+w5Egh1J17yki7s8ioOVGFzayBFeGVjdXRpb24wATnoJJSi8Ez4F0HwNX6k8Ez4
F0ouCghjcmV3X2tleRIiCiBkNTUxMTNiZTRhYTQxYmE2NDNkMzI2MDQyYjJmMDNmMUoxCgdjcmV3
X2lkEiYKJDc5MjFlZWJiLTFiODUtNDMzYy1iMTAwLWQ1ODJjMjk4OTMwZEouCgh0YXNrX2tleRIi
CiA0YTMxYjg1MTMzYTNhMjk0YzY4NTNkYTc1N2Q0YmFlN0oxCgd0YXNrX2lkEiYKJDA2YWFmM2Y1
LTE5ODctNDAxYS05Yzk0LWY3ZjM1YmQzMDg3OXoCGAGFAQABAAASrg0KEOJZEqiJ7LTTX/J+tuLR
stQSCHKjy4tIcmKEKgxDcmV3IENyZWF0ZWQwATmIEuGk8Ez4F0FYDuOk8Ez4F0oaCg5jcmV3YWlf
dmVyc2lvbhIICgYwLjYxLjBKGgoOcHl0aG9uX3ZlcnNpb24SCAoGMy4xMS43Si4KCGNyZXdfa2V5
EiIKIDExMWI4NzJkOGYwY2Y3MDNmMmVmZWYwNGNmM2FjNzk4SjEKB2NyZXdfaWQSJgokYWFiYmU5
MmQtYjg3NC00NTZmLWE0NzAtM2FmMDc4ZTdjYThlShwKDGNyZXdfcHJvY2VzcxIMCgpzZXF1ZW50
aWFsShEKC2NyZXdfbWVtb3J5EgIQAEoaChRjcmV3X251bWJlcl9vZl90YXNrcxICGANKGwoVY3Jl
d19udW1iZXJfb2ZfYWdlbnRzEgIYAkqEBQoLY3Jld19hZ2VudHMS9AQK8QRbeyJrZXkiOiAiZTE0
OGU1MzIwMjkzNDk5ZjhjZWJlYTgyNmU3MjU4MmIiLCAiaWQiOiAiZmYzOTE0OGEtZWI2NS00Nzkx
LWI3MTMtM2Q4ZmE1YWQ5NTJlIiwgInJvbGUiOiAidGVzdCByb2xlIiwgInZlcmJvc2U/IjogZmFs
c2UsICJtYXhfaXRlciI6IDE1LCAibWF4X3JwbSI6IG51bGwsICJmdW5jdGlvbl9jYWxsaW5nX2xs
bSI6ICIiLCAibGxtIjogImdwdC00byIsICJkZWxlZ2F0aW9uX2VuYWJsZWQ/IjogZmFsc2UsICJh
bGxvd19jb2RlX2V4ZWN1dGlvbj8iOiBmYWxzZSwgIm1heF9yZXRyeV9saW1pdCI6IDIsICJ0b29s
c19uYW1lcyI6IFtdfSwgeyJrZXkiOiAiZTdlOGVlYTg4NmJjYjhmMTA0NWFiZWVjZjE0MjVkYjci
LCAiaWQiOiAiYzYyNDJmNDMtNmQ2Mi00N2U4LTliYmMtNjM0ZDQwYWI4YTQ2IiwgInJvbGUiOiAi
dGVzdCByb2xlMiIsICJ2ZXJib3NlPyI6IGZhbHNlLCAibWF4X2l0ZXIiOiAxNSwgIm1heF9ycG0i
OiBudWxsLCAiZnVuY3Rpb25fY2FsbGluZ19sbG0iOiAiIiwgImxsbSI6ICJncHQtNG8iLCAiZGVs
ZWdhdGlvbl9lbmFibGVkPyI6IGZhbHNlLCAiYWxsb3dfY29kZV9leGVjdXRpb24/IjogZmFsc2Us
ICJtYXhfcmV0cnlfbGltaXQiOiAyLCAidG9vbHNfbmFtZXMiOiBbXX1dStcFCgpjcmV3X3Rhc2tz
EsgFCsUFW3sia2V5IjogIjMyMmRkYWUzYmM4MGMxZDQ1Yjg1ZmE3NzU2ZGI4NjY1IiwgImlkIjog
IjRmZDZhZDdiLTFjNWMtNDE1ZC1hMWQ4LTgwYzExZGNjMTY4NiIsICJhc3luY19leGVjdXRpb24/
IjogZmFsc2UsICJodW1hbl9pbnB1dD8iOiBmYWxzZSwgImFnZW50X3JvbGUiOiAidGVzdCByb2xl
IiwgImFnZW50X2tleSI6ICJlMTQ4ZTUzMjAyOTM0OTlmOGNlYmVhODI2ZTcyNTgyYiIsICJ0b29s
c19uYW1lcyI6IFtdfSwgeyJrZXkiOiAiY2M0ODc2ZjZlNTg4ZTcxMzQ5YmJkM2E2NTg4OGMzZTki
LCAiaWQiOiAiOTFlYWFhMWMtMWI4ZC00MDcxLTk2ZmQtM2QxZWVkMjhjMzZjIiwgImFzeW5jX2V4
ZWN1dGlvbj8iOiBmYWxzZSwgImh1bWFuX2lucHV0PyI6IGZhbHNlLCAiYWdlbnRfcm9sZSI6ICJ0
ZXN0IHJvbGUiLCAiYWdlbnRfa2V5IjogImUxNDhlNTMyMDI5MzQ5OWY4Y2ViZWE4MjZlNzI1ODJi
IiwgInRvb2xzX25hbWVzIjogW119LCB7ImtleSI6ICJlMGIxM2UxMGQ3YTE0NmRjYzRjNDg4ZmNm
OGQ3NDhhMCIsICJpZCI6ICI4NjExZjhjZS1jNDVlLTQ2OTgtYWEyMS1jMGJkNzdhOGY2ZWYiLCAi
YXN5bmNfZXhlY3V0aW9uPyI6IGZhbHNlLCAiaHVtYW5faW5wdXQ/IjogZmFsc2UsICJhZ2VudF9y
b2xlIjogInRlc3Qgcm9sZTIiLCAiYWdlbnRfa2V5IjogImU3ZThlZWE4ODZiY2I4ZjEwNDVhYmVl
Y2YxNDI1ZGI3IiwgInRvb2xzX25hbWVzIjogW119XXoCGAGFAQABAAASjgIKEMbX6YsWK7RRf4L1
NBRKD6cSCFLJiNmspsyjKgxUYXNrIENyZWF0ZWQwATnonPGk8Ez4F0EotvKk8Ez4F0ouCghjcmV3
X2tleRIiCiAxMTFiODcyZDhmMGNmNzAzZjJlZmVmMDRjZjNhYzc5OEoxCgdjcmV3X2lkEiYKJGFh
YmJlOTJkLWI4NzQtNDU2Zi1hNDcwLTNhZjA3OGU3Y2E4ZUouCgh0YXNrX2tleRIiCiAzMjJkZGFl
M2JjODBjMWQ0NWI4NWZhNzc1NmRiODY2NUoxCgd0YXNrX2lkEiYKJDRmZDZhZDdiLTFjNWMtNDE1
ZC1hMWQ4LTgwYzExZGNjMTY4NnoCGAGFAQABAAASkAIKEM9JnUNanFbE9AtnSxqA7H8SCBWlG0WJ
sMgKKg5UYXNrIEV4ZWN1dGlvbjABOfDo8qTwTPgXQWhEH6XwTPgXSi4KCGNyZXdfa2V5EiIKIDEx
MWI4NzJkOGYwY2Y3MDNmMmVmZWYwNGNmM2FjNzk4SjEKB2NyZXdfaWQSJgokYWFiYmU5MmQtYjg3
NC00NTZmLWE0NzAtM2FmMDc4ZTdjYThlSi4KCHRhc2tfa2V5EiIKIDMyMmRkYWUzYmM4MGMxZDQ1
Yjg1ZmE3NzU2ZGI4NjY1SjEKB3Rhc2tfaWQSJgokNGZkNmFkN2ItMWM1Yy00MTVkLWExZDgtODBj
MTFkY2MxNjg2egIYAYUBAAEAABKOAgoQaQALCJNe5ByN4Wu7FE0kABIIYW/UfVfnYscqDFRhc2sg
Q3JlYXRlZDABOWhzLKXwTPgXQSD8LKXwTPgXSi4KCGNyZXdfa2V5EiIKIDExMWI4NzJkOGYwY2Y3
MDNmMmVmZWYwNGNmM2FjNzk4SjEKB2NyZXdfaWQSJgokYWFiYmU5MmQtYjg3NC00NTZmLWE0NzAt
M2FmMDc4ZTdjYThlSi4KCHRhc2tfa2V5EiIKIGNjNDg3NmY2ZTU4OGU3MTM0OWJiZDNhNjU4ODhj
M2U5SjEKB3Rhc2tfaWQSJgokOTFlYWFhMWMtMWI4ZC00MDcxLTk2ZmQtM2QxZWVkMjhjMzZjegIY
AYUBAAEAABKQAgoQpPfkgFlpIsR/eN2zn+x3MRIILoWF4/HvceAqDlRhc2sgRXhlY3V0aW9uMAE5
GCctpfBM+BdBQLNapfBM+BdKLgoIY3Jld19rZXkSIgogMTExYjg3MmQ4ZjBjZjcwM2YyZWZlZjA0
Y2YzYWM3OThKMQoHY3Jld19pZBImCiRhYWJiZTkyZC1iODc0LTQ1NmYtYTQ3MC0zYWYwNzhlN2Nh
OGVKLgoIdGFza19rZXkSIgogY2M0ODc2ZjZlNTg4ZTcxMzQ5YmJkM2E2NTg4OGMzZTlKMQoHdGFz
a19pZBImCiQ5MWVhYWExYy0xYjhkLTQwNzEtOTZmZC0zZDFlZWQyOGMzNmN6AhgBhQEAAQAAEo4C
ChCdvXmXZRltDxEwZx2XkhWhEghoKdomHHhLGSoMVGFzayBDcmVhdGVkMAE54HpmpfBM+BdB4Pdm
pfBM+BdKLgoIY3Jld19rZXkSIgogMTExYjg3MmQ4ZjBjZjcwM2YyZWZlZjA0Y2YzYWM3OThKMQoH
Y3Jld19pZBImCiRhYWJiZTkyZC1iODc0LTQ1NmYtYTQ3MC0zYWYwNzhlN2NhOGVKLgoIdGFza19r
ZXkSIgogZTBiMTNlMTBkN2ExNDZkY2M0YzQ4OGZjZjhkNzQ4YTBKMQoHdGFza19pZBImCiQ4NjEx
ZjhjZS1jNDVlLTQ2OTgtYWEyMS1jMGJkNzdhOGY2ZWZ6AhgBhQEAAQAAEpACChAIvs/XQL53haTt
NV8fk6geEgicgSOcpcYulyoOVGFzayBFeGVjdXRpb24wATnYImel8Ez4F0Gw5ZSl8Ez4F0ouCghj
cmV3X2tleRIiCiAxMTFiODcyZDhmMGNmNzAzZjJlZmVmMDRjZjNhYzc5OEoxCgdjcmV3X2lkEiYK
JGFhYmJlOTJkLWI4NzQtNDU2Zi1hNDcwLTNhZjA3OGU3Y2E4ZUouCgh0YXNrX2tleRIiCiBlMGIx
M2UxMGQ3YTE0NmRjYzRjNDg4ZmNmOGQ3NDhhMEoxCgd0YXNrX2lkEiYKJDg2MTFmOGNlLWM0NWUt
NDY5OC1hYTIxLWMwYmQ3N2E4ZjZlZnoCGAGFAQABAAASvAcKEARTPn0s+U/k8GclUc+5rRoSCHF3
KCh8OS0FKgxDcmV3IENyZWF0ZWQwATlo+Pul8Ez4F0EQ0f2l8Ez4F0oaCg5jcmV3YWlfdmVyc2lv
bhIICgYwLjYxLjBKGgoOcHl0aG9uX3ZlcnNpb24SCAoGMy4xMS43Si4KCGNyZXdfa2V5EiIKIDQ5
NGYzNjU3MjM3YWQ4YTMwMzViMmYxYmVlY2RjNjc3SjEKB2NyZXdfaWQSJgokOWMwNzg3NWUtMTMz
Mi00MmMzLWFhZTEtZjNjMjc1YTQyNjYwShwKDGNyZXdfcHJvY2VzcxIMCgpzZXF1ZW50aWFsShEK
C2NyZXdfbWVtb3J5EgIQAEoaChRjcmV3X251bWJlcl9vZl90YXNrcxICGAFKGwoVY3Jld19udW1i
ZXJfb2ZfYWdlbnRzEgIYAUrbAgoLY3Jld19hZ2VudHMSywIKyAJbeyJrZXkiOiAiZTE0OGU1MzIw
MjkzNDk5ZjhjZWJlYTgyNmU3MjU4MmIiLCAiaWQiOiAiNGFkYzNmMmItN2IwNC00MDRlLWEwNDQt
N2JkNjVmYTMyZmE4IiwgInJvbGUiOiAidGVzdCByb2xlIiwgInZlcmJvc2U/IjogZmFsc2UsICJt
YXhfaXRlciI6IDE1LCAibWF4X3JwbSI6IG51bGwsICJmdW5jdGlvbl9jYWxsaW5nX2xsbSI6ICIi
LCAibGxtIjogImdwdC00byIsICJkZWxlZ2F0aW9uX2VuYWJsZWQ/IjogZmFsc2UsICJhbGxvd19j
b2RlX2V4ZWN1dGlvbj8iOiBmYWxzZSwgIm1heF9yZXRyeV9saW1pdCI6IDIsICJ0b29sc19uYW1l
cyI6IFsibGVhcm5fYWJvdXRfYWkiXX1dSo4CCgpjcmV3X3Rhc2tzEv8BCvwBW3sia2V5IjogImYy
NTk3Yzc4NjdmYmUzMjRkYzY1ZGMwOGRmZGJmYzZjIiwgImlkIjogIjg2YzZiODE2LTgyOWMtNDUx
Zi1iMDZkLTUyZjQ4YTdhZWJiMyIsICJhc3luY19leGVjdXRpb24/IjogZmFsc2UsICJodW1hbl9p
bnB1dD8iOiBmYWxzZSwgImFnZW50X3JvbGUiOiAidGVzdCByb2xlIiwgImFnZW50X2tleSI6ICJl
MTQ4ZTUzMjAyOTM0OTlmOGNlYmVhODI2ZTcyNTgyYiIsICJ0b29sc19uYW1lcyI6IFsibGVhcm5f
YWJvdXRfYWkiXX1degIYAYUBAAEAABKOAgoQZWSU3+i71QSqlD8iiLdyWBII1Pawtza2ZHsqDFRh
c2sgQ3JlYXRlZDABOdj2FKbwTPgXQZhUFabwTPgXSi4KCGNyZXdfa2V5EiIKIDQ5NGYzNjU3MjM3
YWQ4YTMwMzViMmYxYmVlY2RjNjc3SjEKB2NyZXdfaWQSJgokOWMwNzg3NWUtMTMzMi00MmMzLWFh
ZTEtZjNjMjc1YTQyNjYwSi4KCHRhc2tfa2V5EiIKIGYyNTk3Yzc4NjdmYmUzMjRkYzY1ZGMwOGRm
ZGJmYzZjSjEKB3Rhc2tfaWQSJgokODZjNmI4MTYtODI5Yy00NTFmLWIwNmQtNTJmNDhhN2FlYmIz
egIYAYUBAAEAABKRAQoQl3nNMLhrOg+OgsWWX6A9LxIINbCKrQzQ3JkqClRvb2wgVXNhZ2UwATlA
TlCm8Ez4F0FASFGm8Ez4F0oaCg5jcmV3YWlfdmVyc2lvbhIICgYwLjYxLjBKHQoJdG9vbF9uYW1l
EhAKDmxlYXJuX2Fib3V0X0FJSg4KCGF0dGVtcHRzEgIYAXoCGAGFAQABAAASkAIKEL9YI/QwoVBJ
1HBkTLyQxOESCCcKWhev/Dc8Kg5UYXNrIEV4ZWN1dGlvbjABOXiDFabwTPgXQcjEfqbwTPgXSi4K
CGNyZXdfa2V5EiIKIDQ5NGYzNjU3MjM3YWQ4YTMwMzViMmYxYmVlY2RjNjc3SjEKB2NyZXdfaWQS
JgokOWMwNzg3NWUtMTMzMi00MmMzLWFhZTEtZjNjMjc1YTQyNjYwSi4KCHRhc2tfa2V5EiIKIGYy
NTk3Yzc4NjdmYmUzMjRkYzY1ZGMwOGRmZGJmYzZjSjEKB3Rhc2tfaWQSJgokODZjNmI4MTYtODI5
Yy00NTFmLWIwNmQtNTJmNDhhN2FlYmIzegIYAYUBAAEAABLBBwoQ0Le1256mT8wmcvnuLKYeNRII
IYBlVsTs+qEqDENyZXcgQ3JlYXRlZDABOYCBiKrwTPgXQRBeiqrwTPgXShoKDmNyZXdhaV92ZXJz
aW9uEggKBjAuNjEuMEoaCg5weXRob25fdmVyc2lvbhIICgYzLjExLjdKLgoIY3Jld19rZXkSIgog
NDk0ZjM2NTcyMzdhZDhhMzAzNWIyZjFiZWVjZGM2NzdKMQoHY3Jld19pZBImCiQyN2VlMGYyYy1h
ZjgwLTQxYWMtYjg3ZC0xNmViYWQyMTVhNTJKHAoMY3Jld19wcm9jZXNzEgwKCnNlcXVlbnRpYWxK
EQoLY3Jld19tZW1vcnkSAhAAShoKFGNyZXdfbnVtYmVyX29mX3Rhc2tzEgIYAUobChVjcmV3X251
bWJlcl9vZl9hZ2VudHMSAhgBSuACCgtjcmV3X2FnZW50cxLQAgrNAlt7ImtleSI6ICJlMTQ4ZTUz
MjAyOTM0OTlmOGNlYmVhODI2ZTcyNTgyYiIsICJpZCI6ICJmMTYyMTFjNS00YWJlLTRhZDAtOWI0
YS0yN2RmMTJhODkyN2UiLCAicm9sZSI6ICJ0ZXN0IHJvbGUiLCAidmVyYm9zZT8iOiBmYWxzZSwg
Im1heF9pdGVyIjogMiwgIm1heF9ycG0iOiBudWxsLCAiZnVuY3Rpb25fY2FsbGluZ19sbG0iOiAi
Z3B0LTRvIiwgImxsbSI6ICJncHQtNG8iLCAiZGVsZWdhdGlvbl9lbmFibGVkPyI6IGZhbHNlLCAi
YWxsb3dfY29kZV9leGVjdXRpb24/IjogZmFsc2UsICJtYXhfcmV0cnlfbGltaXQiOiAyLCAidG9v
bHNfbmFtZXMiOiBbImxlYXJuX2Fib3V0X2FpIl19XUqOAgoKY3Jld190YXNrcxL/AQr8AVt7Imtl
eSI6ICJmMjU5N2M3ODY3ZmJlMzI0ZGM2NWRjMDhkZmRiZmM2YyIsICJpZCI6ICJjN2FiOWRiYi0y
MTc4LTRmOGItOGFiNi1kYTU1YzE0YTBkMGMiLCAiYXN5bmNfZXhlY3V0aW9uPyI6IGZhbHNlLCAi
aHVtYW5faW5wdXQ/IjogZmFsc2UsICJhZ2VudF9yb2xlIjogInRlc3Qgcm9sZSIsICJhZ2VudF9r
ZXkiOiAiZTE0OGU1MzIwMjkzNDk5ZjhjZWJlYTgyNmU3MjU4MmIiLCAidG9vbHNfbmFtZXMiOiBb
ImxlYXJuX2Fib3V0X2FpIl19XXoCGAGFAQABAAASjgIKECr4ueCUCo/tMB7EuBQt6TcSCD/UepYl
WGqAKgxUYXNrIENyZWF0ZWQwATk4kpyq8Ez4F0Hg85yq8Ez4F0ouCghjcmV3X2tleRIiCiA0OTRm
MzY1NzIzN2FkOGEzMDM1YjJmMWJlZWNkYzY3N0oxCgdjcmV3X2lkEiYKJDI3ZWUwZjJjLWFmODAt
NDFhYy1iODdkLTE2ZWJhZDIxNWE1MkouCgh0YXNrX2tleRIiCiBmMjU5N2M3ODY3ZmJlMzI0ZGM2
NWRjMDhkZmRiZmM2Y0oxCgd0YXNrX2lkEiYKJGM3YWI5ZGJiLTIxNzgtNGY4Yi04YWI2LWRhNTVj
MTRhMGQwY3oCGAGFAQABAAASeQoQkj0vmbCBIZPi33W9KrvrYhIIM2g73dOAN9QqEFRvb2wgVXNh
Z2UgRXJyb3IwATnQgsyr8Ez4F0GghM2r8Ez4F0oaCg5jcmV3YWlfdmVyc2lvbhIICgYwLjYxLjBK
DwoDbGxtEggKBmdwdC00b3oCGAGFAQABAAASeQoQavr4/1SWr8x7HD5mAzlM0hIIXPx740Skkd0q
EFRvb2wgVXNhZ2UgRXJyb3IwATkouH9C8Uz4F0FQ1YBC8Uz4F0oaCg5jcmV3YWlfdmVyc2lvbhII
CgYwLjYxLjBKDwoDbGxtEggKBmdwdC00b3oCGAGFAQABAAASkAIKEIgmJ3QURJvSsEifMScSiUsS
CCyiPHcZT8AnKg5UYXNrIEV4ZWN1dGlvbjABOcAinarwTPgXQeBEynvxTPgXSi4KCGNyZXdfa2V5
EiIKIDQ5NGYzNjU3MjM3YWQ4YTMwMzViMmYxYmVlY2RjNjc3SjEKB2NyZXdfaWQSJgokMjdlZTBm
MmMtYWY4MC00MWFjLWI4N2QtMTZlYmFkMjE1YTUySi4KCHRhc2tfa2V5EiIKIGYyNTk3Yzc4Njdm
YmUzMjRkYzY1ZGMwOGRmZGJmYzZjSjEKB3Rhc2tfaWQSJgokYzdhYjlkYmItMjE3OC00ZjhiLThh
YjYtZGE1NWMxNGEwZDBjegIYAYUBAAEAABLEBwoQY+GZuYkP6mwdaVQQc11YuhII7ADKOlFZlzQq
DENyZXcgQ3JlYXRlZDABObCoi3zxTPgXQeCUjXzxTPgXShoKDmNyZXdhaV92ZXJzaW9uEggKBjAu
NjEuMEoaCg5weXRob25fdmVyc2lvbhIICgYzLjExLjdKLgoIY3Jld19rZXkSIgogN2U2NjA4OTg5
ODU5YTY3ZWVjODhlZWY3ZmNlODUyMjVKMQoHY3Jld19pZBImCiQxMmE0OTFlNS00NDgwLTQ0MTYt
OTAxYi1iMmI1N2U1ZWU4ZThKHAoMY3Jld19wcm9jZXNzEgwKCnNlcXVlbnRpYWxKEQoLY3Jld19t
ZW1vcnkSAhAAShoKFGNyZXdfbnVtYmVyX29mX3Rhc2tzEgIYAUobChVjcmV3X251bWJlcl9vZl9h
Z2VudHMSAhgBSt8CCgtjcmV3X2FnZW50cxLPAgrMAlt7ImtleSI6ICIyMmFjZDYxMWU0NGVmNWZh
YzA1YjUzM2Q3NWU4ODkzYiIsICJpZCI6ICI5NjljZjhlMy0yZWEwLTQ5ZjgtODNlMS02MzEzYmE4
ODc1ZjUiLCAicm9sZSI6ICJEYXRhIFNjaWVudGlzdCIsICJ2ZXJib3NlPyI6IGZhbHNlLCAibWF4
X2l0ZXIiOiAxNSwgIm1heF9ycG0iOiBudWxsLCAiZnVuY3Rpb25fY2FsbGluZ19sbG0iOiAiIiwg
ImxsbSI6ICJncHQtNG8iLCAiZGVsZWdhdGlvbl9lbmFibGVkPyI6IGZhbHNlLCAiYWxsb3dfY29k
ZV9leGVjdXRpb24/IjogZmFsc2UsICJtYXhfcmV0cnlfbGltaXQiOiAyLCAidG9vbHNfbmFtZXMi
OiBbImdldCBncmVldGluZ3MiXX1dSpICCgpjcmV3X3Rhc2tzEoMCCoACW3sia2V5IjogImEyNzdi
MzRiMmMxNDZmMGM1NmM1ZTEzNTZlOGY4YTU3IiwgImlkIjogImIwMTg0NTI2LTJlOWItNDA0My1h
M2JiLTFiM2QzNWIxNTNhOCIsICJhc3luY19leGVjdXRpb24/IjogZmFsc2UsICJodW1hbl9pbnB1
dD8iOiBmYWxzZSwgImFnZW50X3JvbGUiOiAiRGF0YSBTY2llbnRpc3QiLCAiYWdlbnRfa2V5Ijog
IjIyYWNkNjExZTQ0ZWY1ZmFjMDViNTMzZDc1ZTg4OTNiIiwgInRvb2xzX25hbWVzIjogWyJnZXQg
Z3JlZXRpbmdzIl19XXoCGAGFAQABAAASjgIKEI/rrKkPz08VpVWNehfvxJ0SCIpeq76twGj3KgxU
YXNrIENyZWF0ZWQwATlA9aR88Uz4F0HoVqV88Uz4F0ouCghjcmV3X2tleRIiCiA3ZTY2MDg5ODk4
NTlhNjdlZWM4OGVlZjdmY2U4NTIyNUoxCgdjcmV3X2lkEiYKJDEyYTQ5MWU1LTQ0ODAtNDQxNi05
MDFiLWIyYjU3ZTVlZThlOEouCgh0YXNrX2tleRIiCiBhMjc3YjM0YjJjMTQ2ZjBjNTZjNWUxMzU2
ZThmOGE1N0oxCgd0YXNrX2lkEiYKJGIwMTg0NTI2LTJlOWItNDA0My1hM2JiLTFiM2QzNWIxNTNh
OHoCGAGFAQABAAASkAEKEKKr5LR8SkqfqqktFhniLdkSCPMnqI2ma9UoKgpUb29sIFVzYWdlMAE5
sCHgfPFM+BdB+A/hfPFM+BdKGgoOY3Jld2FpX3ZlcnNpb24SCAoGMC42MS4wShwKCXRvb2xfbmFt
ZRIPCg1HZXQgR3JlZXRpbmdzSg4KCGF0dGVtcHRzEgIYAXoCGAGFAQABAAASkAIKEOj2bALdBlz6
1kP1MvHE5T0SCLw4D7D331IOKg5UYXNrIEV4ZWN1dGlvbjABOeCBpXzxTPgXQSjiEH3xTPgXSi4K
CGNyZXdfa2V5EiIKIDdlNjYwODk4OTg1OWE2N2VlYzg4ZWVmN2ZjZTg1MjI1SjEKB2NyZXdfaWQS
JgokMTJhNDkxZTUtNDQ4MC00NDE2LTkwMWItYjJiNTdlNWVlOGU4Si4KCHRhc2tfa2V5EiIKIGEy
NzdiMzRiMmMxNDZmMGM1NmM1ZTEzNTZlOGY4YTU3SjEKB3Rhc2tfaWQSJgokYjAxODQ1MjYtMmU5
Yi00MDQzLWEzYmItMWIzZDM1YjE1M2E4egIYAYUBAAEAABLQBwoQLjz7NWyGPgGU4tVFJ0sh9BII
N6EzU5f/sykqDENyZXcgQ3JlYXRlZDABOajOcX3xTPgXQUCAc33xTPgXShoKDmNyZXdhaV92ZXJz
aW9uEggKBjAuNjEuMEoaCg5weXRob25fdmVyc2lvbhIICgYzLjExLjdKLgoIY3Jld19rZXkSIgog
YzMwNzYwMDkzMjY3NjE0NDRkNTdjNzFkMWRhM2YyN2NKMQoHY3Jld19pZBImCiQ1N2Y0NjVhNC03
Zjk1LTQ5Y2MtODNmZC0zZTIwNWRhZDBjZTJKHAoMY3Jld19wcm9jZXNzEgwKCnNlcXVlbnRpYWxK
EQoLY3Jld19tZW1vcnkSAhAAShoKFGNyZXdfbnVtYmVyX29mX3Rhc2tzEgIYAUobChVjcmV3X251
bWJlcl9vZl9hZ2VudHMSAhgBSuUCCgtjcmV3X2FnZW50cxLVAgrSAlt7ImtleSI6ICI5OGYzYjFk
NDdjZTk2OWNmMDU3NzI3Yjc4NDE0MjVjZCIsICJpZCI6ICJjZjcyZDlkNy01MjQwLTRkMzEtYjA2
Mi0xMmNjMDU2OGNjM2MiLCAicm9sZSI6ICJGcmllbmRseSBOZWlnaGJvciIsICJ2ZXJib3NlPyI6
IGZhbHNlLCAibWF4X2l0ZXIiOiAxNSwgIm1heF9ycG0iOiBudWxsLCAiZnVuY3Rpb25fY2FsbGlu
Z19sbG0iOiAiIiwgImxsbSI6ICJncHQtNG8iLCAiZGVsZWdhdGlvbl9lbmFibGVkPyI6IGZhbHNl
LCAiYWxsb3dfY29kZV9leGVjdXRpb24/IjogZmFsc2UsICJtYXhfcmV0cnlfbGltaXQiOiAyLCAi
dG9vbHNfbmFtZXMiOiBbImRlY2lkZSBncmVldGluZ3MiXX1dSpgCCgpjcmV3X3Rhc2tzEokCCoYC
W3sia2V5IjogIjgwZDdiY2Q0OTA5OTI5MDA4MzgzMmYwZTk4MzM4MGRmIiwgImlkIjogIjUxNTJk
MmQ2LWYwODYtNGIyMi1hOGMxLTMyODA5NzU1NjZhZCIsICJhc3luY19leGVjdXRpb24/IjogZmFs
c2UsICJodW1hbl9pbnB1dD8iOiBmYWxzZSwgImFnZW50X3JvbGUiOiAiRnJpZW5kbHkgTmVpZ2hi
b3IiLCAiYWdlbnRfa2V5IjogIjk4ZjNiMWQ0N2NlOTY5Y2YwNTc3MjdiNzg0MTQyNWNkIiwgInRv
b2xzX25hbWVzIjogWyJkZWNpZGUgZ3JlZXRpbmdzIl19XXoCGAGFAQABAAASjgIKEM+95r2LzVVg
kqAMolHjl9oSCN9WyhdF/ucVKgxUYXNrIENyZWF0ZWQwATnoCoJ98Uz4F0HwXIJ98Uz4F0ouCghj
cmV3X2tleRIiCiBjMzA3NjAwOTMyNjc2MTQ0NGQ1N2M3MWQxZGEzZjI3Y0oxCgdjcmV3X2lkEiYK
JDU3ZjQ2NWE0LTdmOTUtNDljYy04M2ZkLTNlMjA1ZGFkMGNlMkouCgh0YXNrX2tleRIiCiA4MGQ3
YmNkNDkwOTkyOTAwODM4MzJmMGU5ODMzODBkZkoxCgd0YXNrX2lkEiYKJDUxNTJkMmQ2LWYwODYt
NGIyMi1hOGMxLTMyODA5NzU1NjZhZHoCGAGFAQABAAASkwEKENJjTKn4eTP/P11ERMIGcdYSCIKF
bGEmcS7bKgpUb29sIFVzYWdlMAE5EFu5ffFM+BdBoD26ffFM+BdKGgoOY3Jld2FpX3ZlcnNpb24S
CAoGMC42MS4wSh8KCXRvb2xfbmFtZRISChBEZWNpZGUgR3JlZXRpbmdzSg4KCGF0dGVtcHRzEgIY
AXoCGAGFAQABAAASkAIKEG29htC06tLF7ihE5Yz6NyMSCAAsKzOcj25nKg5UYXNrIEV4ZWN1dGlv
bjABOQCEgn3xTPgXQfgg7X3xTPgXSi4KCGNyZXdfa2V5EiIKIGMzMDc2MDA5MzI2NzYxNDQ0ZDU3
YzcxZDFkYTNmMjdjSjEKB2NyZXdfaWQSJgokNTdmNDY1YTQtN2Y5NS00OWNjLTgzZmQtM2UyMDVk
YWQwY2UySi4KCHRhc2tfa2V5EiIKIDgwZDdiY2Q0OTA5OTI5MDA4MzgzMmYwZTk4MzM4MGRmSjEK
B3Rhc2tfaWQSJgokNTE1MmQyZDYtZjA4Ni00YjIyLWE4YzEtMzI4MDk3NTU2NmFkegIYAYUBAAEA
AA==
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '18925'
Content-Type:
- application/x-protobuf
User-Agent:
- OTel-OTLP-Exporter-Python/1.27.0
method: POST
uri: https://telemetry.crewai.com:4319/v1/traces
response:
body:
string: "\n\0"
headers:
Content-Length:
- '2'
Content-Type:
- application/x-protobuf
Date:
- Tue, 24 Sep 2024 21:57:39 GMT
status:
code: 200
message: OK
- request:
body: '{"model": "gemma2:latest", "prompt": "### User:\nRespond in 20 words. Who
are you?\n\n", "options": {}, "stream": false}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '120'
Content-Type:
- application/json
User-Agent:
- python-requests/2.31.0
method: POST
uri: http://localhost:8080/api/generate
response:
body:
string: '{"model":"gemma2:latest","created_at":"2024-09-24T21:57:51.284303Z","response":"I
am Gemma, an open-weights AI assistant developed by Google DeepMind. \n","done":true,"done_reason":"stop","context":[106,1645,108,6176,4926,235292,108,54657,575,235248,235284,235276,3907,235265,7702,708,692,235336,109,107,108,106,2516,108,235285,1144,137061,235269,671,2174,235290,30316,16481,20409,6990,731,6238,20555,35777,235265,139,108],"total_duration":14046647083,"load_duration":12942541833,"prompt_eval_count":25,"prompt_eval_duration":177695000,"eval_count":19,"eval_duration":923120000}'
headers:
Content-Length:
- '579'
Content-Type:
- application/json; charset=utf-8
Date:
- Tue, 24 Sep 2024 21:57:51 GMT
status:
code: 200
message: OK
version: 1

View File

@@ -0,0 +1,36 @@
interactions:
- request:
body: '{"model": "llama3.2:3b", "prompt": "### User:\nRespond in 20 words. Who
which model are you?\n\n", "options": {"stop": ["\nObservation:"]}, "stream":
false}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '156'
Content-Type:
- application/json
User-Agent:
- python-requests/2.32.3
method: POST
uri: http://localhost:11434/api/generate
response:
body:
string: '{"model":"llama3.2:3b","created_at":"2025-01-02T20:07:07.623404Z","response":"I''m
an AI designed to assist and communicate with users, utilizing a combination
of natural language processing models.","done":true,"done_reason":"stop","context":[128006,9125,128007,271,38766,1303,33025,2696,25,6790,220,2366,18,271,128009,128006,882,128007,271,14711,2724,512,66454,304,220,508,4339,13,10699,902,1646,527,499,1980,128009,128006,78191,128007,271,40,2846,459,15592,6319,311,7945,323,19570,449,3932,11,35988,264,10824,315,5933,4221,8863,4211,13],"total_duration":1076617833,"load_duration":46505416,"prompt_eval_count":40,"prompt_eval_duration":626000000,"eval_count":22,"eval_duration":399000000}'
headers:
Content-Length:
- '690'
Content-Type:
- application/json; charset=utf-8
Date:
- Thu, 02 Jan 2025 20:07:07 GMT
status:
code: 200
message: OK
version: 1

View File

@@ -0,0 +1,988 @@
interactions:
- request:
body: !!binary |
CpotCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkS8SwKEgoQY3Jld2FpLnRl
bGVtZXRyeRLrCQoQmqG4kmRspGSV9KSDE2WH2hIInKDQhtLNgqEqDENyZXcgQ3JlYXRlZDABOeCb
nCGokxcYQYDspiGokxcYShoKDmNyZXdhaV92ZXJzaW9uEggKBjAuOTUuMEoaCg5weXRob25fdmVy
c2lvbhIICgYzLjExLjdKLgoIY3Jld19rZXkSIgogY2FhMWFlYjNkZDQzNjM4NjU2OGE1YzNmZTIx
MDFhZjVKMQoHY3Jld19pZBImCiQxOWRmM2Y3MS1kYzk0LTQ0ZjYtYmY0Zi0zNjBjZjY2YjJiYWZK
HAoMY3Jld19wcm9jZXNzEgwKCnNlcXVlbnRpYWxKEQoLY3Jld19tZW1vcnkSAhAAShoKFGNyZXdf
bnVtYmVyX29mX3Rhc2tzEgIYAUobChVjcmV3X251bWJlcl9vZl9hZ2VudHMSAhgCSo4FCgtjcmV3
X2FnZW50cxL+BAr7BFt7ImtleSI6ICI5N2Y0MTdmM2UxZTMxY2YwYzEwOWY3NTI5YWM4ZjZiYyIs
ICJpZCI6ICJjMzIyZGMzMS0zZDNlLTRlOTctYjgwNi02MDU3ZTZjNGQxZmUiLCAicm9sZSI6ICJQ
cm9ncmFtbWVyIiwgInZlcmJvc2U/IjogZmFsc2UsICJtYXhfaXRlciI6IDIwLCAibWF4X3JwbSI6
IG51bGwsICJmdW5jdGlvbl9jYWxsaW5nX2xsbSI6ICIiLCAibGxtIjogImdwdC00by1taW5pIiwg
ImRlbGVnYXRpb25fZW5hYmxlZD8iOiB0cnVlLCAiYWxsb3dfY29kZV9leGVjdXRpb24/IjogdHJ1
ZSwgIm1heF9yZXRyeV9saW1pdCI6IDIsICJ0b29sc19uYW1lcyI6IFtdfSwgeyJrZXkiOiAiOTJh
MjRiMGJjY2ZiMGRjMGU0MzlkN2Q1OWJhOWY2ZjMiLCAiaWQiOiAiYzMzMGJlNDAtYWQxMS00YjM2
LWEwYTYtY2E4NWY5ZWFjYzZhIiwgInJvbGUiOiAiQ29kZSBSZXZpZXdlciIsICJ2ZXJib3NlPyI6
IGZhbHNlLCAibWF4X2l0ZXIiOiAyMCwgIm1heF9ycG0iOiBudWxsLCAiZnVuY3Rpb25fY2FsbGlu
Z19sbG0iOiAiIiwgImxsbSI6ICJncHQtNG8tbWluaSIsICJkZWxlZ2F0aW9uX2VuYWJsZWQ/Ijog
dHJ1ZSwgImFsbG93X2NvZGVfZXhlY3V0aW9uPyI6IHRydWUsICJtYXhfcmV0cnlfbGltaXQiOiAy
LCAidG9vbHNfbmFtZXMiOiBbXX1dSooCCgpjcmV3X3Rhc2tzEvsBCvgBW3sia2V5IjogIjc5YWEy
N2RmNzRlNjI3OWUzNGE4ODg4MTc0ODFjNDBmIiwgImlkIjogIjEyYmNjNTAwLWExNzgtNGQyZS05
NmQ4LWNkN2UwZmYzNzRhMCIsICJhc3luY19leGVjdXRpb24/IjogZmFsc2UsICJodW1hbl9pbnB1
dD8iOiBmYWxzZSwgImFnZW50X3JvbGUiOiAiUHJvZ3JhbW1lciIsICJhZ2VudF9rZXkiOiAiOTdm
NDE3ZjNlMWUzMWNmMGMxMDlmNzUyOWFjOGY2YmMiLCAidG9vbHNfbmFtZXMiOiBbInRlc3QgdG9v
bCJdfV16AhgBhQEAAQAAErMHChCxSjXt2/kv7CqAN8F+6ZMMEghR4jnKP0dHjSoMQ3JldyBDcmVh
dGVkMAE5iBNAIqiTFxhBiGZHIqiTFxhKGgoOY3Jld2FpX3ZlcnNpb24SCAoGMC45NS4wShoKDnB5
dGhvbl92ZXJzaW9uEggKBjMuMTEuN0ouCghjcmV3X2tleRIiCiA3NzNhODc2YjU3OTJkYjY5NTU5
ZmU4MmMzYWQyMzU5ZkoxCgdjcmV3X2lkEiYKJDk2YjRkMmFlLTQ3ZDUtNDA0MS1hNjJhLTAyMmMy
ZDUzZGZkZkocCgxjcmV3X3Byb2Nlc3MSDAoKc2VxdWVudGlhbEoRCgtjcmV3X21lbW9yeRICEABK
GgoUY3Jld19udW1iZXJfb2ZfdGFza3MSAhgBShsKFWNyZXdfbnVtYmVyX29mX2FnZW50cxICGAFK
2QIKC2NyZXdfYWdlbnRzEskCCsYCW3sia2V5IjogIjA3N2M3YTg2N2UyMGQwYTY4Yjk3NGU0NzYw
NzEwOWYzIiwgImlkIjogIjVhOTJiYzM4LWFlNGEtNGViZC1iNTM2LTFkZGVjZDBkODBhYyIsICJy
b2xlIjogIk11bHRpbW9kYWwgQW5hbHlzdCIsICJ2ZXJib3NlPyI6IGZhbHNlLCAibWF4X2l0ZXIi
OiAyMCwgIm1heF9ycG0iOiBudWxsLCAiZnVuY3Rpb25fY2FsbGluZ19sbG0iOiAiIiwgImxsbSI6
ICJncHQtNG8tbWluaSIsICJkZWxlZ2F0aW9uX2VuYWJsZWQ/IjogZmFsc2UsICJhbGxvd19jb2Rl
X2V4ZWN1dGlvbj8iOiBmYWxzZSwgIm1heF9yZXRyeV9saW1pdCI6IDIsICJ0b29sc19uYW1lcyI6
IFtdfV1KhwIKCmNyZXdfdGFza3MS+AEK9QFbeyJrZXkiOiAiYzc1M2M2ODA2MzU5NDM2YTU4OTZm
ZWMwOWJhYTEyNWUiLCAiaWQiOiAiNmRhZTcyNzktMDhjNS00OGNiLWI5OWItYmUyYjAwMzhkYzgz
IiwgImFzeW5jX2V4ZWN1dGlvbj8iOiBmYWxzZSwgImh1bWFuX2lucHV0PyI6IGZhbHNlLCAiYWdl
bnRfcm9sZSI6ICJNdWx0aW1vZGFsIEFuYWx5c3QiLCAiYWdlbnRfa2V5IjogIjA3N2M3YTg2N2Uy
MGQwYTY4Yjk3NGU0NzYwNzEwOWYzIiwgInRvb2xzX25hbWVzIjogW119XXoCGAGFAQABAAASqQcK
EIW4ljcZA7v+rs1zMkO4T0wSCIcyNxRlQUYoKgxDcmV3IENyZWF0ZWQwATngxKQiqJMXGEHIIasi
qJMXGEoaCg5jcmV3YWlfdmVyc2lvbhIICgYwLjk1LjBKGgoOcHl0aG9uX3ZlcnNpb24SCAoGMy4x
MS43Si4KCGNyZXdfa2V5EiIKIGNkNGRhNjRlNmRjM2I5ZWJkY2EyNDQ0YzFkNzMwMjgxSjEKB2Ny
ZXdfaWQSJgokMDY0ZDJmMmYtYWEzMy00MmU4LTgyYjAtMjc1YzM4MzY0MjU0ShwKDGNyZXdfcHJv
Y2VzcxIMCgpzZXF1ZW50aWFsShEKC2NyZXdfbWVtb3J5EgIQAEoaChRjcmV3X251bWJlcl9vZl90
YXNrcxICGAFKGwoVY3Jld19udW1iZXJfb2ZfYWdlbnRzEgIYAUrUAgoLY3Jld19hZ2VudHMSxAIK
wQJbeyJrZXkiOiAiZDg1MTA2NGI5YjQ4NDE4YWMyNWY4ZDM3YzdlMzJiYjYiLCAiaWQiOiAiY2M4
OWQ4YTAtYjk5Yy00MDNkLTg1ODYtNjgzZDA1MGVjMjlhIiwgInJvbGUiOiAiSW1hZ2UgQW5hbHlz
dCIsICJ2ZXJib3NlPyI6IGZhbHNlLCAibWF4X2l0ZXIiOiAyMCwgIm1heF9ycG0iOiBudWxsLCAi
ZnVuY3Rpb25fY2FsbGluZ19sbG0iOiAiIiwgImxsbSI6ICJncHQtNG8tbWluaSIsICJkZWxlZ2F0
aW9uX2VuYWJsZWQ/IjogZmFsc2UsICJhbGxvd19jb2RlX2V4ZWN1dGlvbj8iOiBmYWxzZSwgIm1h
eF9yZXRyeV9saW1pdCI6IDIsICJ0b29sc19uYW1lcyI6IFtdfV1KggIKCmNyZXdfdGFza3MS8wEK
8AFbeyJrZXkiOiAiZWU4NzI5Njk0MTBjOTRjMzM0ZjljZmZhMGE0MTVmZWMiLCAiaWQiOiAiNDY3
ZmVlNDktZDkzMi00Nzg1LWI1M2QtYTdkNWQxOTk3NzNmIiwgImFzeW5jX2V4ZWN1dGlvbj8iOiBm
YWxzZSwgImh1bWFuX2lucHV0PyI6IGZhbHNlLCAiYWdlbnRfcm9sZSI6ICJJbWFnZSBBbmFseXN0
IiwgImFnZW50X2tleSI6ICJkODUxMDY0YjliNDg0MThhYzI1ZjhkMzdjN2UzMmJiNiIsICJ0b29s
c19uYW1lcyI6IFtdfV16AhgBhQEAAQAAEqMHChD9ptX+M+ebjYJvJRIgLS+sEgi86MlIS3PYaCoM
Q3JldyBDcmVhdGVkMAE5MGUTI6iTFxhBqKoZI6iTFxhKGgoOY3Jld2FpX3ZlcnNpb24SCAoGMC45
NS4wShoKDnB5dGhvbl92ZXJzaW9uEggKBjMuMTEuN0ouCghjcmV3X2tleRIiCiBlMzk1NjdiNTA1
MjkwOWNhMzM0MDk4NGI4Mzg5ODBlYUoxCgdjcmV3X2lkEiYKJGQwM2I0NDRiLTBmMjAtNGY5Ni1i
MjA0LWQ3YzQ4MzYyNGM0YkocCgxjcmV3X3Byb2Nlc3MSDAoKc2VxdWVudGlhbEoRCgtjcmV3X21l
bW9yeRICEABKGgoUY3Jld19udW1iZXJfb2ZfdGFza3MSAhgBShsKFWNyZXdfbnVtYmVyX29mX2Fn
ZW50cxICGAFKzgIKC2NyZXdfYWdlbnRzEr4CCrsCW3sia2V5IjogIjlkYzhjY2UwMzA0NjgxOTYw
NDFiNGMzODBiNjE3Y2IwIiwgImlkIjogImM4Mjc0MmM1LWIzZjQtNDJkMC1iYjNmLTRkZWM4Y2Q4
MDNmNCIsICJyb2xlIjogIkltYWdlIEFuYWx5c3QiLCAidmVyYm9zZT8iOiB0cnVlLCAibWF4X2l0
ZXIiOiAyMCwgIm1heF9ycG0iOiBudWxsLCAiZnVuY3Rpb25fY2FsbGluZ19sbG0iOiAiIiwgImxs
bSI6ICJncHQtNG8iLCAiZGVsZWdhdGlvbl9lbmFibGVkPyI6IGZhbHNlLCAiYWxsb3dfY29kZV9l
eGVjdXRpb24/IjogZmFsc2UsICJtYXhfcmV0cnlfbGltaXQiOiAyLCAidG9vbHNfbmFtZXMiOiBb
XX1dSoICCgpjcmV3X3Rhc2tzEvMBCvABW3sia2V5IjogImE5YTc2Y2E2OTU3ZDBiZmZhNjllYWIy
MGI2NjQ4MjJiIiwgImlkIjogImU4ZDFmNWM0LWJhNDEtNGQyNy1iMGZmLWU3MmNiNDA0MWJhMyIs
ICJhc3luY19leGVjdXRpb24/IjogZmFsc2UsICJodW1hbl9pbnB1dD8iOiBmYWxzZSwgImFnZW50
X3JvbGUiOiAiSW1hZ2UgQW5hbHlzdCIsICJhZ2VudF9rZXkiOiAiOWRjOGNjZTAzMDQ2ODE5NjA0
MWI0YzM4MGI2MTdjYjAiLCAidG9vbHNfbmFtZXMiOiBbXX1degIYAYUBAAEAABKOAgoQEQqgiftV
3giK4F9VtKBNSBIIVzb/bxKe7icqDFRhc2sgQ3JlYXRlZDABOejyJyOokxcYQdhIKCOokxcYSi4K
CGNyZXdfa2V5EiIKIGUzOTU2N2I1MDUyOTA5Y2EzMzQwOTg0YjgzODk4MGVhSjEKB2NyZXdfaWQS
JgokZDAzYjQ0NGItMGYyMC00Zjk2LWIyMDQtZDdjNDgzNjI0YzRiSi4KCHRhc2tfa2V5EiIKIGE5
YTc2Y2E2OTU3ZDBiZmZhNjllYWIyMGI2NjQ4MjJiSjEKB3Rhc2tfaWQSJgokZThkMWY1YzQtYmE0
MS00ZDI3LWIwZmYtZTcyY2I0MDQxYmEzegIYAYUBAAEAABKXAQoQg/ksOtq7LbOO50GnDSOHQBII
YX08fxOToKwqClRvb2wgVXNhZ2UwATlI/lskqJMXGEEAY2IkqJMXGEoaCg5jcmV3YWlfdmVyc2lv
bhIICgYwLjk1LjBKIwoJdG9vbF9uYW1lEhYKFEFkZCBpbWFnZSB0byBjb250ZW50Sg4KCGF0dGVt
cHRzEgIYAXoCGAGFAQABAAASqAcKEEmW3y/PMPhkfMJ/43EA4SASCHMJp4PEDhFLKgxDcmV3IENy
ZWF0ZWQwATkAuLYlqJMXGEHAaL4lqJMXGEoaCg5jcmV3YWlfdmVyc2lvbhIICgYwLjk1LjBKGgoO
cHl0aG9uX3ZlcnNpb24SCAoGMy4xMS43Si4KCGNyZXdfa2V5EiIKIDAwYjk0NmJlNDQzNzE0YjNh
NDdjMjAxMDFlYjAyZDY2SjEKB2NyZXdfaWQSJgokNzJkZTEwZTQtNDkwZC00NDYwLTk1NzMtMmU5
ZmM5YTMwMWE1ShwKDGNyZXdfcHJvY2VzcxIMCgpzZXF1ZW50aWFsShEKC2NyZXdfbWVtb3J5EgIQ
AEoaChRjcmV3X251bWJlcl9vZl90YXNrcxICGAFKGwoVY3Jld19udW1iZXJfb2ZfYWdlbnRzEgIY
AUrTAgoLY3Jld19hZ2VudHMSwwIKwAJbeyJrZXkiOiAiNGI4YTdiODQwZjk0YmY3ODE4YjVkNTNm
Njg5MjdmZDUiLCAiaWQiOiAiN2IyMGMyODMtNGFiNy00MjFlLTgzM2QtOWE5N2UzNjFjM2Q2Iiwg
InJvbGUiOiAiUmVwb3J0IFdyaXRlciIsICJ2ZXJib3NlPyI6IHRydWUsICJtYXhfaXRlciI6IDIw
LCAibWF4X3JwbSI6IG51bGwsICJmdW5jdGlvbl9jYWxsaW5nX2xsbSI6ICIiLCAibGxtIjogImdw
dC00by1taW5pIiwgImRlbGVnYXRpb25fZW5hYmxlZD8iOiBmYWxzZSwgImFsbG93X2NvZGVfZXhl
Y3V0aW9uPyI6IGZhbHNlLCAibWF4X3JldHJ5X2xpbWl0IjogMiwgInRvb2xzX25hbWVzIjogW119
XUqCAgoKY3Jld190YXNrcxLzAQrwAVt7ImtleSI6ICJiNzEzYzgyZmViOTJjOWY1YzU4YjQwYTk3
NTU2YjdhYyIsICJpZCI6ICJhZjFhOTYxOC05MjRhLTRlNzktYjZlYi01OGRhMTM2OTU5YzUiLCAi
YXN5bmNfZXhlY3V0aW9uPyI6IGZhbHNlLCAiaHVtYW5faW5wdXQ/IjogZmFsc2UsICJhZ2VudF9y
b2xlIjogIlJlcG9ydCBXcml0ZXIiLCAiYWdlbnRfa2V5IjogIjRiOGE3Yjg0MGY5NGJmNzgxOGI1
ZDUzZjY4OTI3ZmQ1IiwgInRvb2xzX25hbWVzIjogW119XXoCGAGFAQABAAASjgIKEIWRa5ZrcXnJ
3rJdzzJ56j8SCKr45vrXkeyTKgxUYXNrIENyZWF0ZWQwATn488glqJMXGEHoScklqJMXGEouCghj
cmV3X2tleRIiCiAwMGI5NDZiZTQ0MzcxNGIzYTQ3YzIwMTAxZWIwMmQ2NkoxCgdjcmV3X2lkEiYK
JDcyZGUxMGU0LTQ5MGQtNDQ2MC05NTczLTJlOWZjOWEzMDFhNUouCgh0YXNrX2tleRIiCiBiNzEz
YzgyZmViOTJjOWY1YzU4YjQwYTk3NTU2YjdhY0oxCgd0YXNrX2lkEiYKJGFmMWE5NjE4LTkyNGEt
NGU3OS1iNmViLTU4ZGExMzY5NTljNXoCGAGFAQABAAA=
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '5789'
Content-Type:
- application/x-protobuf
User-Agent:
- OTel-OTLP-Exporter-Python/1.27.0
method: POST
uri: https://telemetry.crewai.com:4319/v1/traces
response:
body:
string: "\n\0"
headers:
Content-Length:
- '2'
Content-Type:
- application/x-protobuf
Date:
- Sat, 04 Jan 2025 19:22:17 GMT
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "system", "content": "You are Report Writer. You''re
an expert at writing structured reports.\nYour personal goal is: Create properly
formatted reports\nTo give my best complete final answer to the task use the
exact following format:\n\nThought: I now can give a great answer\nFinal Answer:
Your final answer must be the great and the most complete as possible, it must
be outcome described.\n\nI MUST use these formats, my job depends on it!"},
{"role": "user", "content": "\nCurrent Task: Write a report about AI with exactly
3 key points.\n\nThis is the expect criteria for your final answer: A properly
formatted report\nyou MUST return the actual complete content as the final answer,
not a summary.\n\nBegin! This is VERY important to you, use the tools available
and give your best Final Answer, your job depends on it!\n\nThought:"}], "model":
"gpt-4o-mini", "stop": ["\nObservation:"], "stream": false}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '934'
content-type:
- application/json
cookie:
- _cfuvid=v_wJZ5m7qCjrnRfks0gT2GAk9yR14BdIDAQiQR7xxI8-1735266215000-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.52.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.52.1
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-Am40qBAFJtuaFsOlTsBHFCoYUvLhN\",\n \"object\":
\"chat.completion\",\n \"created\": 1736018532,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"I now can give a great answer. \\nFinal
Answer: \\n\\n# Report on Artificial Intelligence (AI)\\n\\n## Introduction\\nArtificial
Intelligence (AI) is a rapidly evolving technology that simulates human intelligence
processes by machines, particularly computer systems. AI has a profound impact
on various sectors, enhancing efficiency, improving decision-making, and leading
to groundbreaking innovations. This report highlights three key points regarding
the significance and implications of AI technology.\\n\\n## Key Point 1: Transformative
Potential in Various Industries\\nAI's transformative potential is evident across
multiple industries, including healthcare, finance, transportation, and agriculture.
In healthcare, AI algorithms can analyze complex medical data, leading to improved
diagnostics, personalized medicine, and predictive analytics, thereby enhancing
patient outcomes. The financial sector employs AI for risk management, fraud
detection, and automated trading, which increases operational efficiency and
minimizes human error. In transportation, AI is integral to the development
of autonomous vehicles and smart traffic systems, optimizing routes and reducing
congestion. Furthermore, agriculture benefits from AI applications through precision
farming, which maximizes yield while minimizing environmental impact.\\n\\n##
Key Point 2: Ethical Considerations and Challenges\\nAs AI technologies become
more pervasive, ethical considerations arise regarding their implementation
and use. Concerns include data privacy, algorithmic bias, and the displacement
of jobs due to automation. Ensuring that AI systems are transparent, fair, and
accountable is crucial in addressing these issues. Organizations must develop
comprehensive guidelines and regulatory frameworks to mitigate bias in AI algorithms
and protect user data. Moreover, addressing the social implications of AI, such
as potential job displacement, is essential, necessitating investment in workforce
retraining and education to prepare for an AI-driven economy.\\n\\n## Key Point
3: Future Directions and Developments\\nLooking ahead, the future of AI promises
continued advancements and integration into everyday life. Emerging trends include
the development of explainable AI (XAI), enhancing interpretability and understanding
of AI decision-making processes. Advances in natural language processing (NLP)
will facilitate better human-computer interactions, allowing for more intuitive
applications. Additionally, as AI technology becomes increasingly sophisticated,
its role in addressing global challenges, such as climate change and healthcare
disparities, is expected to expand. Stakeholders must collaborate to ensure
that these developments align with ethical standards and societal needs, fostering
a responsible AI future.\\n\\n## Conclusion\\nArtificial Intelligence stands
at the forefront of technological innovation, with the potential to revolutionize
industries and address complex global challenges. However, it is imperative
to navigate the ethical considerations and challenges it poses. By fostering
responsible AI development, we can harness its transformative power while ensuring
equitability and transparency for future generations.\",\n \"refusal\":
null\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 170,\n \"completion_tokens\":
524,\n \"total_tokens\": 694,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
\"fp_0aa8d3e20b\"\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8fcd9890790e0133-GRU
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 04 Jan 2025 19:22:19 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=pumYGlf1gsbVoFNTM1vh9Okj41SgxP3y65T5YWWPU1U-1736018539-1.0.1.1-wmaotkWMviN4lKh6M3P04A8p61Ehm.rTehDpsJhxYhNBNU5.kznMCa3cNXePaEbsKkk4PU2QcWjHj2C7yDrjkw;
path=/; expires=Sat, 04-Jan-25 19:52:19 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=SlnUP7AT9jJlQiN.Fm1c7MDyo78_hBRAz8PoabvHVSU-1736018539826-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '7717'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999790'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_08d237d56b0168a0f4512417380485db
http_version: HTTP/1.1
status_code: 200
- request:
body: !!binary |
Cs4CCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkSpQIKEgoQY3Jld2FpLnRl
bGVtZXRyeRKOAgoQw9qUJPsh6jiJZX4qW3ry4hIIT7E0SNH7Ub4qDFRhc2sgQ3JlYXRlZDABOQBO
BAmqkxcYQQgdBQmqkxcYSi4KCGNyZXdfa2V5EiIKIDAwYjk0NmJlNDQzNzE0YjNhNDdjMjAxMDFl
YjAyZDY2SjEKB2NyZXdfaWQSJgokNzJkZTEwZTQtNDkwZC00NDYwLTk1NzMtMmU5ZmM5YTMwMWE1
Si4KCHRhc2tfa2V5EiIKIGI3MTNjODJmZWI5MmM5ZjVjNThiNDBhOTc1NTZiN2FjSjEKB3Rhc2tf
aWQSJgokYWYxYTk2MTgtOTI0YS00ZTc5LWI2ZWItNThkYTEzNjk1OWM1egIYAYUBAAEAAA==
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '337'
Content-Type:
- application/x-protobuf
User-Agent:
- OTel-OTLP-Exporter-Python/1.27.0
method: POST
uri: https://telemetry.crewai.com:4319/v1/traces
response:
body:
string: "\n\0"
headers:
Content-Length:
- '2'
Content-Type:
- application/x-protobuf
Date:
- Sat, 04 Jan 2025 19:22:22 GMT
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "system", "content": "You are Report Writer. You''re
an expert at writing structured reports.\nYour personal goal is: Create properly
formatted reports\nTo give my best complete final answer to the task use the
exact following format:\n\nThought: I now can give a great answer\nFinal Answer:
Your final answer must be the great and the most complete as possible, it must
be outcome described.\n\nI MUST use these formats, my job depends on it!"},
{"role": "user", "content": "\nCurrent Task: Write a report about AI with exactly
3 key points.\n\nThis is the expect criteria for your final answer: A properly
formatted report\nyou MUST return the actual complete content as the final answer,
not a summary.\n\nThis is the context you''re working with:\n### Previous attempt
failed validation: Output must start with ''REPORT:'' no formatting, just the
word REPORT\n\n\n### Previous result:\n# Report on Artificial Intelligence (AI)\n\n##
Introduction\nArtificial Intelligence (AI) is a rapidly evolving technology
that simulates human intelligence processes by machines, particularly computer
systems. AI has a profound impact on various sectors, enhancing efficiency,
improving decision-making, and leading to groundbreaking innovations. This report
highlights three key points regarding the significance and implications of AI
technology.\n\n## Key Point 1: Transformative Potential in Various Industries\nAI''s
transformative potential is evident across multiple industries, including healthcare,
finance, transportation, and agriculture. In healthcare, AI algorithms can analyze
complex medical data, leading to improved diagnostics, personalized medicine,
and predictive analytics, thereby enhancing patient outcomes. The financial
sector employs AI for risk management, fraud detection, and automated trading,
which increases operational efficiency and minimizes human error. In transportation,
AI is integral to the development of autonomous vehicles and smart traffic systems,
optimizing routes and reducing congestion. Furthermore, agriculture benefits
from AI applications through precision farming, which maximizes yield while
minimizing environmental impact.\n\n## Key Point 2: Ethical Considerations and
Challenges\nAs AI technologies become more pervasive, ethical considerations
arise regarding their implementation and use. Concerns include data privacy,
algorithmic bias, and the displacement of jobs due to automation. Ensuring that
AI systems are transparent, fair, and accountable is crucial in addressing these
issues. Organizations must develop comprehensive guidelines and regulatory frameworks
to mitigate bias in AI algorithms and protect user data. Moreover, addressing
the social implications of AI, such as potential job displacement, is essential,
necessitating investment in workforce retraining and education to prepare for
an AI-driven economy.\n\n## Key Point 3: Future Directions and Developments\nLooking
ahead, the future of AI promises continued advancements and integration into
everyday life. Emerging trends include the development of explainable AI (XAI),
enhancing interpretability and understanding of AI decision-making processes.
Advances in natural language processing (NLP) will facilitate better human-computer
interactions, allowing for more intuitive applications. Additionally, as AI
technology becomes increasingly sophisticated, its role in addressing global
challenges, such as climate change and healthcare disparities, is expected to
expand. Stakeholders must collaborate to ensure that these developments align
with ethical standards and societal needs, fostering a responsible AI future.\n\n##
Conclusion\nArtificial Intelligence stands at the forefront of technological
innovation, with the potential to revolutionize industries and address complex
global challenges. However, it is imperative to navigate the ethical considerations
and challenges it poses. By fostering responsible AI development, we can harness
its transformative power while ensuring equitability and transparency for future
generations.\n\n\nTry again, making sure to address the validation error.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop":
["\nObservation:"], "stream": false}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '4351'
content-type:
- application/json
cookie:
- _cfuvid=SlnUP7AT9jJlQiN.Fm1c7MDyo78_hBRAz8PoabvHVSU-1736018539826-0.0.1.1-604800000;
__cf_bm=pumYGlf1gsbVoFNTM1vh9Okj41SgxP3y65T5YWWPU1U-1736018539-1.0.1.1-wmaotkWMviN4lKh6M3P04A8p61Ehm.rTehDpsJhxYhNBNU5.kznMCa3cNXePaEbsKkk4PU2QcWjHj2C7yDrjkw
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.52.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.52.1
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-Am40yJsMPHsTOmn9Obwyx2caqoJ1R\",\n \"object\":
\"chat.completion\",\n \"created\": 1736018540,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
Answer: \\nREPORT: \\n\\n# Report on Artificial Intelligence (AI)\\n\\n##
Introduction\\nArtificial Intelligence (AI) is a rapidly evolving technology
that simulates human intelligence processes by machines, particularly computer
systems. AI has a profound impact on various sectors, enhancing efficiency,
improving decision-making, and leading to groundbreaking innovations. This report
highlights three key points regarding the significance and implications of AI
technology.\\n\\n## Key Point 1: Transformative Potential in Various Industries\\nAI's
transformative potential is evident across multiple industries, including healthcare,
finance, transportation, and agriculture. In healthcare, AI algorithms can analyze
complex medical data, leading to improved diagnostics, personalized medicine,
and predictive analytics, thereby enhancing patient outcomes. The financial
sector employs AI for risk management, fraud detection, and automated trading,
which increases operational efficiency and minimizes human error. In transportation,
AI is integral to the development of autonomous vehicles and smart traffic systems,
optimizing routes and reducing congestion. Furthermore, agriculture benefits
from AI applications through precision farming, which maximizes yield while
minimizing environmental impact.\\n\\n## Key Point 2: Ethical Considerations
and Challenges\\nAs AI technologies become more pervasive, ethical considerations
arise regarding their implementation and use. Concerns include data privacy,
algorithmic bias, and the displacement of jobs due to automation. Ensuring that
AI systems are transparent, fair, and accountable is crucial in addressing these
issues. Organizations must develop comprehensive guidelines and regulatory frameworks
to mitigate bias in AI algorithms and protect user data. Moreover, addressing
the social implications of AI, such as potential job displacement, is essential,
necessitating investment in workforce retraining and education to prepare for
an AI-driven economy.\\n\\n## Key Point 3: Future Directions and Developments\\nLooking
ahead, the future of AI promises continued advancements and integration into
everyday life. Emerging trends include the development of explainable AI (XAI),
enhancing interpretability and understanding of AI decision-making processes.
Advances in natural language processing (NLP) will facilitate better human-computer
interactions, allowing for more intuitive applications. Additionally, as AI
technology becomes increasingly sophisticated, its role in addressing global
challenges, such as climate change and healthcare disparities, is expected to
expand. Stakeholders must collaborate to ensure that these developments align
with ethical standards and societal needs, fostering a responsible AI future.\\n\\n##
Conclusion\\nArtificial Intelligence stands at the forefront of technological
innovation, with the potential to revolutionize industries and address complex
global challenges. However, it is imperative to navigate the ethical considerations
and challenges it poses. By fostering responsible AI development, we can harness
its transformative power while ensuring equitability and transparency for future
generations.\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
725,\n \"completion_tokens\": 526,\n \"total_tokens\": 1251,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
\"fp_0aa8d3e20b\"\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8fcd98c269880133-GRU
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 04 Jan 2025 19:22:28 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '8620'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149998942'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_de480c9e17954e77dece1b2fe013a0d0
http_version: HTTP/1.1
status_code: 200
- request:
body: !!binary |
Cs4CCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkSpQIKEgoQY3Jld2FpLnRl
bGVtZXRyeRKOAgoQCwIBgw9XNdGpuGOOIANe2hIIriM3k2t+0NQqDFRhc2sgQ3JlYXRlZDABOcjF
ABuskxcYQfBlARuskxcYSi4KCGNyZXdfa2V5EiIKIDAwYjk0NmJlNDQzNzE0YjNhNDdjMjAxMDFl
YjAyZDY2SjEKB2NyZXdfaWQSJgokNzJkZTEwZTQtNDkwZC00NDYwLTk1NzMtMmU5ZmM5YTMwMWE1
Si4KCHRhc2tfa2V5EiIKIGI3MTNjODJmZWI5MmM5ZjVjNThiNDBhOTc1NTZiN2FjSjEKB3Rhc2tf
aWQSJgokYWYxYTk2MTgtOTI0YS00ZTc5LWI2ZWItNThkYTEzNjk1OWM1egIYAYUBAAEAAA==
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '337'
Content-Type:
- application/x-protobuf
User-Agent:
- OTel-OTLP-Exporter-Python/1.27.0
method: POST
uri: https://telemetry.crewai.com:4319/v1/traces
response:
body:
string: "\n\0"
headers:
Content-Length:
- '2'
Content-Type:
- application/x-protobuf
Date:
- Sat, 04 Jan 2025 19:22:32 GMT
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "system", "content": "You are Report Writer. You''re
an expert at writing structured reports.\nYour personal goal is: Create properly
formatted reports\nTo give my best complete final answer to the task use the
exact following format:\n\nThought: I now can give a great answer\nFinal Answer:
Your final answer must be the great and the most complete as possible, it must
be outcome described.\n\nI MUST use these formats, my job depends on it!"},
{"role": "user", "content": "\nCurrent Task: Write a report about AI with exactly
3 key points.\n\nThis is the expect criteria for your final answer: A properly
formatted report\nyou MUST return the actual complete content as the final answer,
not a summary.\n\nThis is the context you''re working with:\n### Previous attempt
failed validation: Output must end with ''END REPORT'' no formatting, just the
word END REPORT\n\n\n### Previous result:\nREPORT: \n\n# Report on Artificial
Intelligence (AI)\n\n## Introduction\nArtificial Intelligence (AI) is a rapidly
evolving technology that simulates human intelligence processes by machines,
particularly computer systems. AI has a profound impact on various sectors,
enhancing efficiency, improving decision-making, and leading to groundbreaking
innovations. This report highlights three key points regarding the significance
and implications of AI technology.\n\n## Key Point 1: Transformative Potential
in Various Industries\nAI''s transformative potential is evident across multiple
industries, including healthcare, finance, transportation, and agriculture.
In healthcare, AI algorithms can analyze complex medical data, leading to improved
diagnostics, personalized medicine, and predictive analytics, thereby enhancing
patient outcomes. The financial sector employs AI for risk management, fraud
detection, and automated trading, which increases operational efficiency and
minimizes human error. In transportation, AI is integral to the development
of autonomous vehicles and smart traffic systems, optimizing routes and reducing
congestion. Furthermore, agriculture benefits from AI applications through precision
farming, which maximizes yield while minimizing environmental impact.\n\n##
Key Point 2: Ethical Considerations and Challenges\nAs AI technologies become
more pervasive, ethical considerations arise regarding their implementation
and use. Concerns include data privacy, algorithmic bias, and the displacement
of jobs due to automation. Ensuring that AI systems are transparent, fair, and
accountable is crucial in addressing these issues. Organizations must develop
comprehensive guidelines and regulatory frameworks to mitigate bias in AI algorithms
and protect user data. Moreover, addressing the social implications of AI, such
as potential job displacement, is essential, necessitating investment in workforce
retraining and education to prepare for an AI-driven economy.\n\n## Key Point
3: Future Directions and Developments\nLooking ahead, the future of AI promises
continued advancements and integration into everyday life. Emerging trends include
the development of explainable AI (XAI), enhancing interpretability and understanding
of AI decision-making processes. Advances in natural language processing (NLP)
will facilitate better human-computer interactions, allowing for more intuitive
applications. Additionally, as AI technology becomes increasingly sophisticated,
its role in addressing global challenges, such as climate change and healthcare
disparities, is expected to expand. Stakeholders must collaborate to ensure
that these developments align with ethical standards and societal needs, fostering
a responsible AI future.\n\n## Conclusion\nArtificial Intelligence stands at
the forefront of technological innovation, with the potential to revolutionize
industries and address complex global challenges. However, it is imperative
to navigate the ethical considerations and challenges it poses. By fostering
responsible AI development, we can harness its transformative power while ensuring
equitability and transparency for future generations.\n\n\nTry again, making
sure to address the validation error.\n\nBegin! This is VERY important to you,
use the tools available and give your best Final Answer, your job depends on
it!\n\nThought:"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"], "stream":
false}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '4369'
content-type:
- application/json
cookie:
- _cfuvid=SlnUP7AT9jJlQiN.Fm1c7MDyo78_hBRAz8PoabvHVSU-1736018539826-0.0.1.1-604800000;
__cf_bm=pumYGlf1gsbVoFNTM1vh9Okj41SgxP3y65T5YWWPU1U-1736018539-1.0.1.1-wmaotkWMviN4lKh6M3P04A8p61Ehm.rTehDpsJhxYhNBNU5.kznMCa3cNXePaEbsKkk4PU2QcWjHj2C7yDrjkw
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.52.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.52.1
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-Am4176wzYnk3HmSTkkakM4yl6xVYS\",\n \"object\":
\"chat.completion\",\n \"created\": 1736018549,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
Answer: \\n\\n# Report on Artificial Intelligence (AI)\\n\\n## Introduction\\nArtificial
Intelligence (AI) is a revolutionary technology designed to simulate human intelligence
processes, enabling machines to perform tasks that typically require human cognition.
Its rapid development has brought forth significant changes across various sectors,
improving operational efficiencies, enhancing decision-making, and fostering
innovation. This report outlines three key points regarding the impact and implications
of AI technology.\\n\\n## Key Point 1: Transformative Potential in Various Industries\\nAI's
transformative potential is observable across numerous sectors including healthcare,
finance, transportation, and agriculture. In the healthcare sector, AI algorithms
are increasingly used to analyze vast amounts of medical data, which sharpens
diagnostics, facilitates personalized treatment plans, and enhances predictive
analytics, thus leading to better patient care. In finance, AI contributes to
risk assessment, fraud detection, and automated trading, heightening efficiency
and reducing the risk of human error. The transportation industry leverages
AI technologies for developments in autonomous vehicles and smart transportation
systems that optimize routes and alleviate traffic congestion. Furthermore,
agriculture benefits from AI by applying precision farming techniques that optimize
yield and mitigate environmental effects.\\n\\n## Key Point 2: Ethical Considerations
and Challenges\\nWith the increasing deployment of AI technologies, numerous
ethical considerations surface, particularly relating to privacy, algorithmic
fairness, and the displacement of jobs. Addressing issues such as data security,
bias in AI algorithms, and the societal impact of automation is paramount. Organizations
are encouraged to develop stringent guidelines and regulatory measures aimed
at minimizing bias and ensuring that AI systems uphold values of transparency
and accountability. Additionally, the implications of job displacement necessitate
strategies for workforce retraining and educational reforms to adequately prepare
the workforce for an economy increasingly shaped by AI technologies.\\n\\n##
Key Point 3: Future Directions and Developments\\nThe future of AI is poised
for remarkable advancements, with trends indicating a growing integration into
daily life and widespread applications. The emergence of explainable AI (XAI)
aims to enhance the transparency and interpretability of AI decision-making
processes, fostering trust and understanding among users. Improvements in natural
language processing (NLP) are likely to lead to more seamless and intuitive
human-computer interactions. Furthermore, AI's potential to address global challenges,
including climate change and disparities in healthcare access, is becoming increasingly
significant. Collaborative efforts among stakeholders will be vital to ensuring
that AI advancements are ethical and responsive to societal needs, paving the
way for a responsible and equitable AI landscape.\\n\\n## Conclusion\\nAI technology
is at the forefront of innovation, with the capacity to transform industries
and tackle pressing global issues. As we navigate through the complexities and
ethical challenges posed by AI, it is crucial to prioritize responsible development
and implementation. By harnessing AI's transformative capabilities with a focus
on equity and transparency, we can pave the way for a promising future that
benefits all.\\n\\nEND REPORT\",\n \"refusal\": null\n },\n \"logprobs\":
null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
730,\n \"completion_tokens\": 571,\n \"total_tokens\": 1301,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
\"fp_0aa8d3e20b\"\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8fcd98f9fc060133-GRU
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 04 Jan 2025 19:22:36 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '7203'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149998937'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_cab0502e7d8a8564e56d8f741cf451ec
http_version: HTTP/1.1
status_code: 200
- request:
body: !!binary |
Cs4CCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkSpQIKEgoQY3Jld2FpLnRl
bGVtZXRyeRKOAgoQO/xpq2/yF233Vf8OitYSiBIIdyOEucIqtF8qDFRhc2sgQ3JlYXRlZDABOXDe
ZdqtkxcYQUDaZ9qtkxcYSi4KCGNyZXdfa2V5EiIKIDAwYjk0NmJlNDQzNzE0YjNhNDdjMjAxMDFl
YjAyZDY2SjEKB2NyZXdfaWQSJgokNzJkZTEwZTQtNDkwZC00NDYwLTk1NzMtMmU5ZmM5YTMwMWE1
Si4KCHRhc2tfa2V5EiIKIGI3MTNjODJmZWI5MmM5ZjVjNThiNDBhOTc1NTZiN2FjSjEKB3Rhc2tf
aWQSJgokYWYxYTk2MTgtOTI0YS00ZTc5LWI2ZWItNThkYTEzNjk1OWM1egIYAYUBAAEAAA==
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '337'
Content-Type:
- application/x-protobuf
User-Agent:
- OTel-OTLP-Exporter-Python/1.27.0
method: POST
uri: https://telemetry.crewai.com:4319/v1/traces
response:
body:
string: "\n\0"
headers:
Content-Length:
- '2'
Content-Type:
- application/x-protobuf
Date:
- Sat, 04 Jan 2025 19:22:37 GMT
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "system", "content": "You are Report Writer. You''re
an expert at writing structured reports.\nYour personal goal is: Create properly
formatted reports\nTo give my best complete final answer to the task use the
exact following format:\n\nThought: I now can give a great answer\nFinal Answer:
Your final answer must be the great and the most complete as possible, it must
be outcome described.\n\nI MUST use these formats, my job depends on it!"},
{"role": "user", "content": "\nCurrent Task: Write a report about AI with exactly
3 key points.\n\nThis is the expect criteria for your final answer: A properly
formatted report\nyou MUST return the actual complete content as the final answer,
not a summary.\n\nThis is the context you''re working with:\n### Previous attempt
failed validation: Output must start with ''REPORT:'' no formatting, just the
word REPORT\n\n\n### Previous result:\n# Report on Artificial Intelligence (AI)\n\n##
Introduction\nArtificial Intelligence (AI) is a revolutionary technology designed
to simulate human intelligence processes, enabling machines to perform tasks
that typically require human cognition. Its rapid development has brought forth
significant changes across various sectors, improving operational efficiencies,
enhancing decision-making, and fostering innovation. This report outlines three
key points regarding the impact and implications of AI technology.\n\n## Key
Point 1: Transformative Potential in Various Industries\nAI''s transformative
potential is observable across numerous sectors including healthcare, finance,
transportation, and agriculture. In the healthcare sector, AI algorithms are
increasingly used to analyze vast amounts of medical data, which sharpens diagnostics,
facilitates personalized treatment plans, and enhances predictive analytics,
thus leading to better patient care. In finance, AI contributes to risk assessment,
fraud detection, and automated trading, heightening efficiency and reducing
the risk of human error. The transportation industry leverages AI technologies
for developments in autonomous vehicles and smart transportation systems that
optimize routes and alleviate traffic congestion. Furthermore, agriculture benefits
from AI by applying precision farming techniques that optimize yield and mitigate
environmental effects.\n\n## Key Point 2: Ethical Considerations and Challenges\nWith
the increasing deployment of AI technologies, numerous ethical considerations
surface, particularly relating to privacy, algorithmic fairness, and the displacement
of jobs. Addressing issues such as data security, bias in AI algorithms, and
the societal impact of automation is paramount. Organizations are encouraged
to develop stringent guidelines and regulatory measures aimed at minimizing
bias and ensuring that AI systems uphold values of transparency and accountability.
Additionally, the implications of job displacement necessitate strategies for
workforce retraining and educational reforms to adequately prepare the workforce
for an economy increasingly shaped by AI technologies.\n\n## Key Point 3: Future
Directions and Developments\nThe future of AI is poised for remarkable advancements,
with trends indicating a growing integration into daily life and widespread
applications. The emergence of explainable AI (XAI) aims to enhance the transparency
and interpretability of AI decision-making processes, fostering trust and understanding
among users. Improvements in natural language processing (NLP) are likely to
lead to more seamless and intuitive human-computer interactions. Furthermore,
AI''s potential to address global challenges, including climate change and disparities
in healthcare access, is becoming increasingly significant. Collaborative efforts
among stakeholders will be vital to ensuring that AI advancements are ethical
and responsive to societal needs, paving the way for a responsible and equitable
AI landscape.\n\n## Conclusion\nAI technology is at the forefront of innovation,
with the capacity to transform industries and tackle pressing global issues.
As we navigate through the complexities and ethical challenges posed by AI,
it is crucial to prioritize responsible development and implementation. By harnessing
AI''s transformative capabilities with a focus on equity and transparency, we
can pave the way for a promising future that benefits all.\n\nEND REPORT\n\n\nTry
again, making sure to address the validation error.\n\nBegin! This is VERY important
to you, use the tools available and give your best Final Answer, your job depends
on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"], "stream":
false}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '4669'
content-type:
- application/json
cookie:
- _cfuvid=SlnUP7AT9jJlQiN.Fm1c7MDyo78_hBRAz8PoabvHVSU-1736018539826-0.0.1.1-604800000;
__cf_bm=pumYGlf1gsbVoFNTM1vh9Okj41SgxP3y65T5YWWPU1U-1736018539-1.0.1.1-wmaotkWMviN4lKh6M3P04A8p61Ehm.rTehDpsJhxYhNBNU5.kznMCa3cNXePaEbsKkk4PU2QcWjHj2C7yDrjkw
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.52.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.52.1
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-Am41EaJaKZSumZe8ph2I32d6QNbTP\",\n \"object\":
\"chat.completion\",\n \"created\": 1736018556,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
Answer: \\n\\nREPORT: \\n\\n# Report on Artificial Intelligence (AI)\\n\\n##
Introduction\\nArtificial Intelligence (AI) is a revolutionary technology designed
to simulate human intelligence processes, enabling machines to perform tasks
that typically require human cognition. Its rapid development has brought forth
significant changes across various sectors, improving operational efficiencies,
enhancing decision-making, and fostering innovation. This report outlines three
key points regarding the impact and implications of AI technology.\\n\\n## Key
Point 1: Transformative Potential in Various Industries\\nAI's transformative
potential is observable across numerous sectors including healthcare, finance,
transportation, and agriculture. In the healthcare sector, AI algorithms are
increasingly used to analyze vast amounts of medical data, which sharpens diagnostics,
facilitates personalized treatment plans, and enhances predictive analytics,
thus leading to better patient care. In finance, AI contributes to risk assessment,
fraud detection, and automated trading, heightening efficiency and reducing
the risk of human error. The transportation industry leverages AI technologies
for developments in autonomous vehicles and smart transportation systems that
optimize routes and alleviate traffic congestion. Furthermore, agriculture benefits
from AI by applying precision farming techniques that optimize yield and mitigate
environmental effects.\\n\\n## Key Point 2: Ethical Considerations and Challenges\\nWith
the increasing deployment of AI technologies, numerous ethical considerations
surface, particularly relating to privacy, algorithmic fairness, and the displacement
of jobs. Addressing issues such as data security, bias in AI algorithms, and
the societal impact of automation is paramount. Organizations are encouraged
to develop stringent guidelines and regulatory measures aimed at minimizing
bias and ensuring that AI systems uphold values of transparency and accountability.
Additionally, the implications of job displacement necessitate strategies for
workforce retraining and educational reforms to adequately prepare the workforce
for an economy increasingly shaped by AI technologies.\\n\\n## Key Point 3:
Future Directions and Developments\\nThe future of AI is poised for remarkable
advancements, with trends indicating a growing integration into daily life and
widespread applications. The emergence of explainable AI (XAI) aims to enhance
the transparency and interpretability of AI decision-making processes, fostering
trust and understanding among users. Improvements in natural language processing
(NLP) are likely to lead to more seamless and intuitive human-computer interactions.
Furthermore, AI's potential to address global challenges, including climate
change and disparities in healthcare access, is becoming increasingly significant.
Collaborative efforts among stakeholders will be vital to ensuring that AI advancements
are ethical and responsive to societal needs, paving the way for a responsible
and equitable AI landscape.\\n\\n## Conclusion\\nAI technology is at the forefront
of innovation, with the capacity to transform industries and tackle pressing
global issues. As we navigate through the complexities and ethical challenges
posed by AI, it is crucial to prioritize responsible development and implementation.
By harnessing AI's transformative capabilities with a focus on equity and transparency,
we can pave the way for a promising future that benefits all.\\n\\nEND REPORT\",\n
\ \"refusal\": null\n },\n \"logprobs\": null,\n \"finish_reason\":
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 774,\n \"completion_tokens\":
574,\n \"total_tokens\": 1348,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
\"fp_0aa8d3e20b\"\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8fcd9928eaa40133-GRU
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 04 Jan 2025 19:22:46 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '9767'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149998862'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_d3d0e47180363d07d988cb5ab639597c
http_version: HTTP/1.1
status_code: 200
version: 1

View File

@@ -1,35 +0,0 @@
interactions:
- request:
body: '{"model": "gemma2:latest", "prompt": "### User:\nRespond in 20 words. Who
are you?\n\n", "options": {"num_predict": 30, "temperature": 0.7}, "stream":
false}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '157'
Content-Type:
- application/json
User-Agent:
- python-requests/2.31.0
method: POST
uri: http://localhost:8080/api/generate
response:
body:
string: '{"model":"gemma2:latest","created_at":"2024-09-24T21:57:52.329049Z","response":"I
am Gemma, an open-weights AI assistant trained by Google DeepMind. \n","done":true,"done_reason":"stop","context":[106,1645,108,6176,4926,235292,108,54657,575,235248,235284,235276,3907,235265,7702,708,692,235336,109,107,108,106,2516,108,235285,1144,137061,235269,671,2174,235290,30316,16481,20409,17363,731,6238,20555,35777,235265,139,108],"total_duration":991843667,"load_duration":31664750,"prompt_eval_count":25,"prompt_eval_duration":51409000,"eval_count":19,"eval_duration":908132000}'
headers:
Content-Length:
- '572'
Content-Type:
- application/json; charset=utf-8
Date:
- Tue, 24 Sep 2024 21:57:52 GMT
status:
code: 200
message: OK
version: 1

View File

@@ -0,0 +1,36 @@
interactions:
- request:
body: '{"model": "llama3.2:3b", "prompt": "### User:\nRespond in 20 words. Which
model are you??\n\n", "options": {"num_predict": 30, "temperature": 0.7}, "stream":
false}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '164'
Content-Type:
- application/json
User-Agent:
- python-requests/2.32.3
method: POST
uri: http://localhost:11434/api/generate
response:
body:
string: '{"model":"llama3.2:3b","created_at":"2025-01-02T20:24:24.812595Z","response":"I''m
an AI, specifically a large language model, designed to understand and respond
to user queries with accuracy.","done":true,"done_reason":"stop","context":[128006,9125,128007,271,38766,1303,33025,2696,25,6790,220,2366,18,271,128009,128006,882,128007,271,14711,2724,512,66454,304,220,508,4339,13,16299,1646,527,499,71291,128009,128006,78191,128007,271,40,2846,459,15592,11,11951,264,3544,4221,1646,11,6319,311,3619,323,6013,311,1217,20126,449,13708,13],"total_duration":827817584,"load_duration":41560542,"prompt_eval_count":39,"prompt_eval_duration":384000000,"eval_count":23,"eval_duration":400000000}'
headers:
Content-Length:
- '683'
Content-Type:
- application/json; charset=utf-8
Date:
- Thu, 02 Jan 2025 20:24:24 GMT
status:
code: 200
message: OK
version: 1

View File

@@ -0,0 +1,146 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are Researcher. You''re
an expert researcher, specialized in technology, software engineering, AI and
startups. You work as a freelancer and is now working on doing research and
analysis for a new customer.\nYour personal goal is: Make the best research
and analysis on content about AI and AI agents\nTo give my best complete final
answer to the task use the exact following format:\n\nThought: I now can give
a great answer\nFinal Answer: Your final answer must be the great and the most
complete as possible, it must be outcome described.\n\nI MUST use these formats,
my job depends on it!"}, {"role": "user", "content": "\nCurrent Task: Give me
a list of 5 interesting ideas to explore for na article, what makes them unique
and interesting.\n\nThis is the expect criteria for your final answer: Bullet
point list of 5 interesting ideas.\nyou MUST return the actual complete content
as the final answer, not a summary.\n\nBegin! This is VERY important to you,
use the tools available and give your best Final Answer, your job depends on
it!\n\nThought:"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"], "stream":
false}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1177'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.52.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.52.1
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-AlfwrGToOoVtDhb3ryZMpA07aZy4m\",\n \"object\":
\"chat.completion\",\n \"created\": 1735926029,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
Answer: \\n- **The Role of Emotional Intelligence in AI Agents**: Explore how
developing emotional intelligence in AI can change user interactions. Investigate
algorithms that enable AI agents to recognize and respond to human emotions,
enhancing user experience in sectors such as therapy, customer service, and
education. This idea is unique as it blends psychology with artificial intelligence,
presenting a new frontier for AI applications.\\n\\n- **AI Agents in Problem-Solving
for Climate Change**: Analyze how AI agents can contribute to developing innovative
solutions for climate change challenges. Focus on their role in predicting climate
patterns, optimizing energy consumption, and managing resources more efficiently.
This topic is unique because it highlights the practical impact of AI on one
of the most pressing global issues.\\n\\n- **The Ethics of Autonomous Decision-Making
AI**: Delve into the ethical implications surrounding AI agents that make autonomous
decisions, especially in critical areas like healthcare, transportation, and
law enforcement. This idea raises questions about accountability and bias, making
it a vital discussion point as AI continues to advance. The unique aspect lies
in the intersection of technology and moral philosophy.\\n\\n- **AI Agents Shaping
the Future of Remote Work**: Investigate how AI agents are transforming remote
work environments through automation, communication facilitation, and performance
monitoring. Discuss unique applications such as virtual assistants, project
management tools, and AI-driven team collaboration platforms. This topic is
particularly relevant as the workforce becomes increasingly remote, making it
an appealing area of exploration.\\n\\n- **Cultural Impacts of AI Agents in
Media and Entertainment**: Examine how AI-driven characters and narratives are
changing the media landscape, from video games to films and animations. Analyze
audience reception and the role of AI in personalizing content. This concept
is unique due to its intersection with digital culture and artistic expression,
offering insights into how technology influences social norms and preferences.\",\n
\ \"refusal\": null\n },\n \"logprobs\": null,\n \"finish_reason\":
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 220,\n \"completion_tokens\":
376,\n \"total_tokens\": 596,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"system_fingerprint\":
\"fp_0aa8d3e20b\"\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8fc4c6324d42ad5a-POA
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Fri, 03 Jan 2025 17:40:34 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=zdRUS9YIvR7oCmJGeB7BOAnmxI7FOE5Jae5yRZDCnPE-1735926034-1.0.1.1-gvIEXrMfT69wL2mv4ApivWX67OOpDegjf1LE6g9u3GEDuQdLQok.vlLZD.SdGzK0bMug86JZhBeDZMleJlI2EQ;
path=/; expires=Fri, 03-Jan-25 18:10:34 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=CW_cKQGYWY3cL.S6Xo5z0cmkmWHy5Q50OA_KjPEijNk-1735926034530-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '5124'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999729'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_95ae59da1099e02c0d95bf25ba179fed
http_version: HTTP/1.1
status_code: 200
version: 1

View File

@@ -177,12 +177,12 @@ class TestDeployCommand(unittest.TestCase):
def test_get_crew_status(self):
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = {"name": "TestCrew", "status": "active"}
mock_response.json.return_value = {"name": "InternalCrew", "status": "active"}
self.mock_client.crew_status_by_name.return_value = mock_response
with patch("sys.stdout", new=StringIO()) as fake_out:
self.deploy_command.get_crew_status()
self.assertIn("TestCrew", fake_out.getvalue())
self.assertIn("InternalCrew", fake_out.getvalue())
self.assertIn("active", fake_out.getvalue())
def test_get_crew_logs(self):

View File

@@ -28,9 +28,10 @@ def test_create_success(mock_subprocess):
with in_temp_dir():
tool_command = ToolCommand()
with patch.object(tool_command, "login") as mock_login, patch(
"sys.stdout", new=StringIO()
) as fake_out:
with (
patch.object(tool_command, "login") as mock_login,
patch("sys.stdout", new=StringIO()) as fake_out,
):
tool_command.create("test-tool")
output = fake_out.getvalue()
@@ -82,7 +83,7 @@ def test_install_success(mock_get, mock_subprocess_run):
capture_output=False,
text=True,
check=True,
env=unittest.mock.ANY
env=unittest.mock.ANY,
)
assert "Successfully installed sample-tool" in output

View File

@@ -333,16 +333,16 @@ def test_manager_agent_delegating_to_assigned_task_agent():
)
mock_task_output = TaskOutput(
description="Mock description",
raw="mocked output",
agent="mocked agent"
description="Mock description", raw="mocked output", agent="mocked agent"
)
# Because we are mocking execute_sync, we never hit the underlying _execute_core
# which sets the output attribute of the task
task.output = mock_task_output
with patch.object(Task, 'execute_sync', return_value=mock_task_output) as mock_execute_sync:
with patch.object(
Task, "execute_sync", return_value=mock_task_output
) as mock_execute_sync:
crew.kickoff()
# Verify execute_sync was called once
@@ -350,12 +350,20 @@ def test_manager_agent_delegating_to_assigned_task_agent():
# Get the tools argument from the call
_, kwargs = mock_execute_sync.call_args
tools = kwargs['tools']
tools = kwargs["tools"]
# Verify the delegation tools were passed correctly
assert len(tools) == 2
assert any("Delegate a specific task to one of the following coworkers: Researcher" in tool.description for tool in tools)
assert any("Ask a specific question to one of the following coworkers: Researcher" in tool.description for tool in tools)
assert any(
"Delegate a specific task to one of the following coworkers: Researcher"
in tool.description
for tool in tools
)
assert any(
"Ask a specific question to one of the following coworkers: Researcher"
in tool.description
for tool in tools
)
@pytest.mark.vcr(filter_headers=["authorization"])
@@ -404,7 +412,7 @@ def test_manager_agent_delegates_with_varied_role_cases():
backstory="A researcher with spaces in role name",
allow_delegation=False,
)
writer_caps = Agent(
role="SENIOR WRITER", # All caps
goal="Write with caps in role",
@@ -426,13 +434,13 @@ def test_manager_agent_delegates_with_varied_role_cases():
)
mock_task_output = TaskOutput(
description="Mock description",
raw="mocked output",
agent="mocked agent"
description="Mock description", raw="mocked output", agent="mocked agent"
)
task.output = mock_task_output
with patch.object(Task, 'execute_sync', return_value=mock_task_output) as mock_execute_sync:
with patch.object(
Task, "execute_sync", return_value=mock_task_output
) as mock_execute_sync:
crew.kickoff()
# Verify execute_sync was called once
@@ -440,20 +448,32 @@ def test_manager_agent_delegates_with_varied_role_cases():
# Get the tools argument from the call
_, kwargs = mock_execute_sync.call_args
tools = kwargs['tools']
tools = kwargs["tools"]
# Verify the delegation tools were passed correctly and can handle case/whitespace variations
assert len(tools) == 2
# Check delegation tool descriptions (should work despite case/whitespace differences)
delegation_tool = tools[0]
question_tool = tools[1]
assert "Delegate a specific task to one of the following coworkers:" in delegation_tool.description
assert " Researcher " in delegation_tool.description or "SENIOR WRITER" in delegation_tool.description
assert "Ask a specific question to one of the following coworkers:" in question_tool.description
assert " Researcher " in question_tool.description or "SENIOR WRITER" in question_tool.description
assert (
"Delegate a specific task to one of the following coworkers:"
in delegation_tool.description
)
assert (
" Researcher " in delegation_tool.description
or "SENIOR WRITER" in delegation_tool.description
)
assert (
"Ask a specific question to one of the following coworkers:"
in question_tool.description
)
assert (
" Researcher " in question_tool.description
or "SENIOR WRITER" in question_tool.description
)
@pytest.mark.vcr(filter_headers=["authorization"])
@@ -479,6 +499,7 @@ def test_crew_with_delegating_agents():
== "In the rapidly evolving landscape of technology, AI agents have emerged as formidable tools, revolutionizing how we interact with data and automate tasks. These sophisticated systems leverage machine learning and natural language processing to perform a myriad of functions, from virtual personal assistants to complex decision-making companions in industries such as finance, healthcare, and education. By mimicking human intelligence, AI agents can analyze massive data sets at unparalleled speeds, enabling businesses to uncover valuable insights, enhance productivity, and elevate user experiences to unprecedented levels.\n\nOne of the most striking aspects of AI agents is their adaptability; they learn from their interactions and continuously improve their performance over time. This feature is particularly valuable in customer service where AI agents can address inquiries, resolve issues, and provide personalized recommendations without the limitations of human fatigue. Moreover, with intuitive interfaces, AI agents enhance user interactions, making technology more accessible and user-friendly, thereby breaking down barriers that have historically hindered digital engagement.\n\nDespite their immense potential, the deployment of AI agents raises important ethical and practical considerations. Issues related to privacy, data security, and the potential for job displacement necessitate thoughtful dialogue and proactive measures. Striking a balance between technological innovation and societal impact will be crucial as organizations integrate these agents into their operations. Additionally, ensuring transparency in AI decision-making processes is vital to maintain public trust as AI agents become an integral part of daily life.\n\nLooking ahead, the future of AI agents appears bright, with ongoing advancements promising even greater capabilities. As we continue to harness the power of AI, we can expect these agents to play a transformative role in shaping various sectors—streamlining workflows, enabling smarter decision-making, and fostering more personalized experiences. Embracing this technology responsibly can lead to a future where AI agents not only augment human effort but also inspire creativity and efficiency across the board, ultimately redefining our interaction with the digital world."
)
@pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_with_delegating_agents_should_not_override_task_tools():
from typing import Type
@@ -489,6 +510,7 @@ def test_crew_with_delegating_agents_should_not_override_task_tools():
class TestToolInput(BaseModel):
"""Input schema for TestTool."""
query: str = Field(..., description="Query to process")
class TestTool(BaseTool):
@@ -516,24 +538,29 @@ def test_crew_with_delegating_agents_should_not_override_task_tools():
)
mock_task_output = TaskOutput(
description="Mock description",
raw="mocked output",
agent="mocked agent"
description="Mock description", raw="mocked output", agent="mocked agent"
)
# Because we are mocking execute_sync, we never hit the underlying _execute_core
# which sets the output attribute of the task
tasks[0].output = mock_task_output
with patch.object(Task, 'execute_sync', return_value=mock_task_output) as mock_execute_sync:
with patch.object(
Task, "execute_sync", return_value=mock_task_output
) as mock_execute_sync:
crew.kickoff()
# Execute the task and verify both tools are present
_, kwargs = mock_execute_sync.call_args
tools = kwargs['tools']
tools = kwargs["tools"]
assert any(
isinstance(tool, TestTool) for tool in tools
), "TestTool should be present"
assert any(
"delegate" in tool.name.lower() for tool in tools
), "Delegation tool should be present"
assert any(isinstance(tool, TestTool) for tool in tools), "TestTool should be present"
assert any("delegate" in tool.name.lower() for tool in tools), "Delegation tool should be present"
@pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_with_delegating_agents_should_not_override_agent_tools():
@@ -545,6 +572,7 @@ def test_crew_with_delegating_agents_should_not_override_agent_tools():
class TestToolInput(BaseModel):
"""Input schema for TestTool."""
query: str = Field(..., description="Query to process")
class TestTool(BaseTool):
@@ -563,7 +591,7 @@ def test_crew_with_delegating_agents_should_not_override_agent_tools():
Task(
description="Produce and amazing 1 paragraph draft of an article about AI Agents.",
expected_output="A 4 paragraph article about AI.",
agent=new_ceo
agent=new_ceo,
)
]
@@ -574,24 +602,29 @@ def test_crew_with_delegating_agents_should_not_override_agent_tools():
)
mock_task_output = TaskOutput(
description="Mock description",
raw="mocked output",
agent="mocked agent"
description="Mock description", raw="mocked output", agent="mocked agent"
)
# Because we are mocking execute_sync, we never hit the underlying _execute_core
# which sets the output attribute of the task
tasks[0].output = mock_task_output
with patch.object(Task, 'execute_sync', return_value=mock_task_output) as mock_execute_sync:
with patch.object(
Task, "execute_sync", return_value=mock_task_output
) as mock_execute_sync:
crew.kickoff()
# Execute the task and verify both tools are present
_, kwargs = mock_execute_sync.call_args
tools = kwargs['tools']
tools = kwargs["tools"]
assert any(
isinstance(tool, TestTool) for tool in new_ceo.tools
), "TestTool should be present"
assert any(
"delegate" in tool.name.lower() for tool in tools
), "Delegation tool should be present"
assert any(isinstance(tool, TestTool) for tool in new_ceo.tools), "TestTool should be present"
assert any("delegate" in tool.name.lower() for tool in tools), "Delegation tool should be present"
@pytest.mark.vcr(filter_headers=["authorization"])
def test_task_tools_override_agent_tools():
@@ -603,6 +636,7 @@ def test_task_tools_override_agent_tools():
class TestToolInput(BaseModel):
"""Input schema for TestTool."""
query: str = Field(..., description="Query to process")
class TestTool(BaseTool):
@@ -630,14 +664,10 @@ def test_task_tools_override_agent_tools():
description="Write a test task",
expected_output="Test output",
agent=new_researcher,
tools=[AnotherTestTool()]
tools=[AnotherTestTool()],
)
crew = Crew(
agents=[new_researcher],
tasks=[task],
process=Process.sequential
)
crew = Crew(agents=[new_researcher], tasks=[task], process=Process.sequential)
crew.kickoff()
@@ -650,6 +680,7 @@ def test_task_tools_override_agent_tools():
assert len(new_researcher.tools) == 1
assert isinstance(new_researcher.tools[0], TestTool)
@pytest.mark.vcr(filter_headers=["authorization"])
def test_task_tools_override_agent_tools_with_allow_delegation():
"""
@@ -702,13 +733,13 @@ def test_task_tools_override_agent_tools_with_allow_delegation():
)
mock_task_output = TaskOutput(
description="Mock description",
raw="mocked output",
agent="mocked agent"
description="Mock description", raw="mocked output", agent="mocked agent"
)
# We mock execute_sync to verify which tools get used at runtime
with patch.object(Task, "execute_sync", return_value=mock_task_output) as mock_execute_sync:
with patch.object(
Task, "execute_sync", return_value=mock_task_output
) as mock_execute_sync:
crew.kickoff()
# Inspect the call kwargs to verify the actual tools passed to execution
@@ -716,16 +747,23 @@ def test_task_tools_override_agent_tools_with_allow_delegation():
used_tools = kwargs["tools"]
# Confirm AnotherTestTool is present but TestTool is not
assert any(isinstance(tool, AnotherTestTool) for tool in used_tools), "AnotherTestTool should be present"
assert not any(isinstance(tool, TestTool) for tool in used_tools), "TestTool should not be present among used tools"
assert any(
isinstance(tool, AnotherTestTool) for tool in used_tools
), "AnotherTestTool should be present"
assert not any(
isinstance(tool, TestTool) for tool in used_tools
), "TestTool should not be present among used tools"
# Confirm delegation tool(s) are present
assert any("delegate" in tool.name.lower() for tool in used_tools), "Delegation tool should be present"
assert any(
"delegate" in tool.name.lower() for tool in used_tools
), "Delegation tool should be present"
# Finally, make sure the agent's original tools remain unchanged
assert len(researcher_with_delegation.tools) == 1
assert isinstance(researcher_with_delegation.tools[0], TestTool)
@pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_verbose_output(capsys):
tasks = [
@@ -1012,8 +1050,8 @@ def test_three_task_with_async_execution():
)
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.asyncio
@pytest.mark.vcr(filter_headers=["authorization"])
async def test_crew_async_kickoff():
inputs = [
{"topic": "dog"},
@@ -1060,8 +1098,9 @@ async def test_crew_async_kickoff():
assert result[0].token_usage.successful_requests > 0 # type: ignore
@pytest.mark.asyncio
@pytest.mark.vcr(filter_headers=["authorization"])
def test_async_task_execution_call_count():
async def test_async_task_execution_call_count():
from unittest.mock import MagicMock, patch
list_ideas = Task(
@@ -1188,7 +1227,6 @@ def test_kickoff_for_each_empty_input():
assert results == []
@pytest.mark.vcr(filter_headers=["authorization"])
def test_kickoff_for_each_invalid_input():
"""Tests if kickoff_for_each raises TypeError for invalid input types."""
@@ -1211,7 +1249,6 @@ def test_kickoff_for_each_invalid_input():
crew.kickoff_for_each("invalid input")
@pytest.mark.vcr(filter_headers=["authorization"])
def test_kickoff_for_each_error_handling():
"""Tests error handling in kickoff_for_each when kickoff raises an error."""
from unittest.mock import patch
@@ -1248,7 +1285,6 @@ def test_kickoff_for_each_error_handling():
crew.kickoff_for_each(inputs=inputs)
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.asyncio
async def test_kickoff_async_basic_functionality_and_output():
"""Tests the basic functionality and output of kickoff_async."""
@@ -1283,7 +1319,6 @@ async def test_kickoff_async_basic_functionality_and_output():
mock_kickoff.assert_called_once_with(inputs)
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.asyncio
async def test_async_kickoff_for_each_async_basic_functionality_and_output():
"""Tests the basic functionality and output of kickoff_for_each_async."""
@@ -1330,7 +1365,6 @@ async def test_async_kickoff_for_each_async_basic_functionality_and_output():
mock_kickoff_async.assert_any_call(inputs=input_data)
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.asyncio
async def test_async_kickoff_for_each_async_empty_input():
"""Tests if akickoff_for_each_async handles an empty input list."""
@@ -1514,12 +1548,12 @@ def test_code_execution_flag_adds_code_tool_upon_kickoff():
crew = Crew(agents=[programmer], tasks=[task])
mock_task_output = TaskOutput(
description="Mock description",
raw="mocked output",
agent="mocked agent"
description="Mock description", raw="mocked output", agent="mocked agent"
)
with patch.object(Task, "execute_sync", return_value=mock_task_output) as mock_execute_sync:
with patch.object(
Task, "execute_sync", return_value=mock_task_output
) as mock_execute_sync:
crew.kickoff()
# Get the tools that were actually used in execution
@@ -1528,7 +1562,10 @@ def test_code_execution_flag_adds_code_tool_upon_kickoff():
# Verify that exactly one tool was used and it was a CodeInterpreterTool
assert len(used_tools) == 1, "Should have exactly one tool"
assert isinstance(used_tools[0], CodeInterpreterTool), "Tool should be CodeInterpreterTool"
assert isinstance(
used_tools[0], CodeInterpreterTool
), "Tool should be CodeInterpreterTool"
@pytest.mark.vcr(filter_headers=["authorization"])
def test_delegation_is_not_enabled_if_there_are_only_one_agent():
@@ -1639,16 +1676,16 @@ def test_hierarchical_crew_creation_tasks_with_agents():
)
mock_task_output = TaskOutput(
description="Mock description",
raw="mocked output",
agent="mocked agent"
description="Mock description", raw="mocked output", agent="mocked agent"
)
# Because we are mocking execute_sync, we never hit the underlying _execute_core
# which sets the output attribute of the task
task.output = mock_task_output
with patch.object(Task, 'execute_sync', return_value=mock_task_output) as mock_execute_sync:
with patch.object(
Task, "execute_sync", return_value=mock_task_output
) as mock_execute_sync:
crew.kickoff()
# Verify execute_sync was called once
@@ -1656,12 +1693,20 @@ def test_hierarchical_crew_creation_tasks_with_agents():
# Get the tools argument from the call
_, kwargs = mock_execute_sync.call_args
tools = kwargs['tools']
tools = kwargs["tools"]
# Verify the delegation tools were passed correctly
assert len(tools) == 2
assert any("Delegate a specific task to one of the following coworkers: Senior Writer" in tool.description for tool in tools)
assert any("Ask a specific question to one of the following coworkers: Senior Writer" in tool.description for tool in tools)
assert any(
"Delegate a specific task to one of the following coworkers: Senior Writer"
in tool.description
for tool in tools
)
assert any(
"Ask a specific question to one of the following coworkers: Senior Writer"
in tool.description
for tool in tools
)
@pytest.mark.vcr(filter_headers=["authorization"])
@@ -1684,9 +1729,7 @@ def test_hierarchical_crew_creation_tasks_with_async_execution():
)
mock_task_output = TaskOutput(
description="Mock description",
raw="mocked output",
agent="mocked agent"
description="Mock description", raw="mocked output", agent="mocked agent"
)
# Create a mock Future that returns our TaskOutput
@@ -1697,7 +1740,9 @@ def test_hierarchical_crew_creation_tasks_with_async_execution():
# which sets the output attribute of the task
task.output = mock_task_output
with patch.object(Task, 'execute_async', return_value=mock_future) as mock_execute_async:
with patch.object(
Task, "execute_async", return_value=mock_future
) as mock_execute_async:
crew.kickoff()
# Verify execute_async was called once
@@ -1705,12 +1750,20 @@ def test_hierarchical_crew_creation_tasks_with_async_execution():
# Get the tools argument from the call
_, kwargs = mock_execute_async.call_args
tools = kwargs['tools']
tools = kwargs["tools"]
# Verify the delegation tools were passed correctly
assert len(tools) == 2
assert any("Delegate a specific task to one of the following coworkers: Senior Writer\n" in tool.description for tool in tools)
assert any("Ask a specific question to one of the following coworkers: Senior Writer\n" in tool.description for tool in tools)
assert any(
"Delegate a specific task to one of the following coworkers: Senior Writer\n"
in tool.description
for tool in tools
)
assert any(
"Ask a specific question to one of the following coworkers: Senior Writer\n"
in tool.description
for tool in tools
)
@pytest.mark.vcr(filter_headers=["authorization"])
@@ -2039,7 +2092,6 @@ def test_crew_output_file_end_to_end(tmp_path):
assert expected_file.exists(), f"Output file {expected_file} was not created"
@pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_output_file_validation_failures():
"""Test output file validation failures in a crew context."""
agent = Agent(
@@ -2055,7 +2107,7 @@ def test_crew_output_file_validation_failures():
description="Analyze data",
expected_output="Analysis results",
agent=agent,
output_file="../output.txt"
output_file="../output.txt",
)
Crew(agents=[agent], tasks=[task]).kickoff()
@@ -2065,7 +2117,7 @@ def test_crew_output_file_validation_failures():
description="Analyze data",
expected_output="Analysis results",
agent=agent,
output_file="output.txt | rm -rf /"
output_file="output.txt | rm -rf /",
)
Crew(agents=[agent], tasks=[task]).kickoff()
@@ -2075,7 +2127,7 @@ def test_crew_output_file_validation_failures():
description="Analyze data",
expected_output="Analysis results",
agent=agent,
output_file="~/output.txt"
output_file="~/output.txt",
)
Crew(agents=[agent], tasks=[task]).kickoff()
@@ -2085,12 +2137,11 @@ def test_crew_output_file_validation_failures():
description="Analyze data",
expected_output="Analysis results",
agent=agent,
output_file="{invalid-name}/output.txt"
output_file="{invalid-name}/output.txt",
)
Crew(agents=[agent], tasks=[task]).kickoff()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_manager_agent():
from unittest.mock import patch
@@ -3049,6 +3100,7 @@ def test_task_tools_preserve_code_execution_tools():
class TestToolInput(BaseModel):
"""Input schema for TestTool."""
query: str = Field(..., description="Query to process")
class TestTool(BaseTool):
@@ -3082,7 +3134,7 @@ def test_task_tools_preserve_code_execution_tools():
description="Write a program to calculate fibonacci numbers.",
expected_output="A working fibonacci calculator.",
agent=programmer,
tools=[TestTool()]
tools=[TestTool()],
)
crew = Crew(
@@ -3092,12 +3144,12 @@ def test_task_tools_preserve_code_execution_tools():
)
mock_task_output = TaskOutput(
description="Mock description",
raw="mocked output",
agent="mocked agent"
description="Mock description", raw="mocked output", agent="mocked agent"
)
with patch.object(Task, "execute_sync", return_value=mock_task_output) as mock_execute_sync:
with patch.object(
Task, "execute_sync", return_value=mock_task_output
) as mock_execute_sync:
crew.kickoff()
# Get the tools that were actually used in execution
@@ -3105,12 +3157,21 @@ def test_task_tools_preserve_code_execution_tools():
used_tools = kwargs["tools"]
# Verify all expected tools are present
assert any(isinstance(tool, TestTool) for tool in used_tools), "Task's TestTool should be present"
assert any(isinstance(tool, CodeInterpreterTool) for tool in used_tools), "CodeInterpreterTool should be present"
assert any("delegate" in tool.name.lower() for tool in used_tools), "Delegation tool should be present"
assert any(
isinstance(tool, TestTool) for tool in used_tools
), "Task's TestTool should be present"
assert any(
isinstance(tool, CodeInterpreterTool) for tool in used_tools
), "CodeInterpreterTool should be present"
assert any(
"delegate" in tool.name.lower() for tool in used_tools
), "Delegation tool should be present"
# Verify the total number of tools (TestTool + CodeInterpreter + 2 delegation tools)
assert len(used_tools) == 4, "Should have TestTool, CodeInterpreter, and 2 delegation tools"
assert (
len(used_tools) == 4
), "Should have TestTool, CodeInterpreter, and 2 delegation tools"
@pytest.mark.vcr(filter_headers=["authorization"])
def test_multimodal_flag_adds_multimodal_tools():
@@ -3139,13 +3200,13 @@ def test_multimodal_flag_adds_multimodal_tools():
crew = Crew(agents=[multimodal_agent], tasks=[task], process=Process.sequential)
mock_task_output = TaskOutput(
description="Mock description",
raw="mocked output",
agent="mocked agent"
description="Mock description", raw="mocked output", agent="mocked agent"
)
# Mock execute_sync to verify the tools passed at runtime
with patch.object(Task, "execute_sync", return_value=mock_task_output) as mock_execute_sync:
with patch.object(
Task, "execute_sync", return_value=mock_task_output
) as mock_execute_sync:
crew.kickoff()
# Get the tools that were actually used in execution
@@ -3153,13 +3214,14 @@ def test_multimodal_flag_adds_multimodal_tools():
used_tools = kwargs["tools"]
# Check that the multimodal tool was added
assert any(isinstance(tool, AddImageTool) for tool in used_tools), (
"AddImageTool should be present when agent is multimodal"
)
assert any(
isinstance(tool, AddImageTool) for tool in used_tools
), "AddImageTool should be present when agent is multimodal"
# Verify we have exactly one tool (just the AddImageTool)
assert len(used_tools) == 1, "Should only have the AddImageTool"
@pytest.mark.vcr(filter_headers=["authorization"])
def test_multimodal_agent_image_tool_handling():
"""
@@ -3201,10 +3263,10 @@ def test_multimodal_agent_image_tool_handling():
mock_task_output = TaskOutput(
description="Mock description",
raw="A detailed analysis of the image",
agent="Image Analyst"
agent="Image Analyst",
)
with patch.object(Task, 'execute_sync') as mock_execute_sync:
with patch.object(Task, "execute_sync") as mock_execute_sync:
# Set up the mock to return our task output
mock_execute_sync.return_value = mock_task_output
@@ -3213,7 +3275,7 @@ def test_multimodal_agent_image_tool_handling():
# Get the tools that were passed to execute_sync
_, kwargs = mock_execute_sync.call_args
tools = kwargs['tools']
tools = kwargs["tools"]
# Verify the AddImageTool is present and properly configured
image_tools = [tool for tool in tools if tool.name == "Add image to content"]
@@ -3223,7 +3285,7 @@ def test_multimodal_agent_image_tool_handling():
image_tool = image_tools[0]
result = image_tool._run(
image_url="https://example.com/test-image.jpg",
action="Please analyze this image"
action="Please analyze this image",
)
# Verify the tool returns the expected format
@@ -3233,6 +3295,7 @@ def test_multimodal_agent_image_tool_handling():
assert result["content"][0]["type"] == "text"
assert result["content"][1]["type"] == "image_url"
@pytest.mark.vcr(filter_headers=["authorization"])
def test_multimodal_agent_live_image_analysis():
"""
@@ -3246,7 +3309,7 @@ def test_multimodal_agent_live_image_analysis():
allow_delegation=False,
multimodal=True,
verbose=True,
llm="gpt-4o"
llm="gpt-4o",
)
# Create a task for image analysis
@@ -3257,21 +3320,127 @@ def test_multimodal_agent_live_image_analysis():
Image: {image_url}
""",
expected_output="A comprehensive description of the image contents.",
agent=image_analyst
agent=image_analyst,
)
# Create and run the crew
crew = Crew(
agents=[image_analyst],
tasks=[analyze_image]
)
crew = Crew(agents=[image_analyst], tasks=[analyze_image])
# Execute with an image URL
result = crew.kickoff(inputs={
"image_url": "https://media.istockphoto.com/id/946087016/photo/aerial-view-of-lower-manhattan-new-york.jpg?s=612x612&w=0&k=20&c=viLiMRznQ8v5LzKTt_LvtfPFUVl1oiyiemVdSlm29_k="
})
result = crew.kickoff(
inputs={
"image_url": "https://media.istockphoto.com/id/946087016/photo/aerial-view-of-lower-manhattan-new-york.jpg?s=612x612&w=0&k=20&c=viLiMRznQ8v5LzKTt_LvtfPFUVl1oiyiemVdSlm29_k="
}
)
# Verify we got a meaningful response
assert isinstance(result.raw, str)
assert len(result.raw) > 100 # Expecting a detailed analysis
assert "error" not in result.raw.lower() # No error messages in response
@pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_with_failing_task_guardrails():
"""Test that crew properly handles failing guardrails and retries with validation feedback."""
def strict_format_guardrail(result: TaskOutput):
"""Validates that the output follows a strict format:
- Must start with 'REPORT:'
- Must end with 'END REPORT'
"""
content = result.raw.strip()
if not ('REPORT:' in content or '**REPORT:**' in content):
return (False, "Output must start with 'REPORT:' no formatting, just the word REPORT")
if not ('END REPORT' in content or '**END REPORT**' in content):
return (False, "Output must end with 'END REPORT' no formatting, just the word END REPORT")
return (True, content)
researcher = Agent(
role="Report Writer",
goal="Create properly formatted reports",
backstory="You're an expert at writing structured reports.",
)
task = Task(
description="""Write a report about AI with exactly 3 key points.""",
expected_output="A properly formatted report",
agent=researcher,
guardrail=strict_format_guardrail,
max_retries=3
)
crew = Crew(
agents=[researcher],
tasks=[task],
)
result = crew.kickoff()
# Verify the final output meets all format requirements
content = result.raw.strip()
assert content.startswith('REPORT:'), "Output should start with 'REPORT:'"
assert content.endswith('END REPORT'), "Output should end with 'END REPORT'"
# Verify task output
task_output = result.tasks_output[0]
assert isinstance(task_output, TaskOutput)
assert task_output.raw == result.raw
@pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_guardrail_feedback_in_context():
"""Test that guardrail feedback is properly appended to task context for retries."""
def format_guardrail(result: TaskOutput):
"""Validates that the output contains a specific keyword."""
if "IMPORTANT" not in result.raw:
return (False, "Output must contain the keyword 'IMPORTANT'")
return (True, result.raw)
# Create execution contexts list to track contexts
execution_contexts = []
researcher = Agent(
role="Writer",
goal="Write content with specific keywords",
backstory="You're an expert at following specific writing requirements.",
allow_delegation=False
)
task = Task(
description="Write a short response.",
expected_output="A response containing the keyword 'IMPORTANT'",
agent=researcher,
guardrail=format_guardrail,
max_retries=2
)
crew = Crew(agents=[researcher], tasks=[task])
with patch.object(Agent, "execute_task") as mock_execute_task:
# Define side_effect to capture context and return different responses
def side_effect(task, context=None, tools=None):
execution_contexts.append(context if context else "")
if len(execution_contexts) == 1:
return "This is a test response"
return "This is an IMPORTANT test response"
mock_execute_task.side_effect = side_effect
result = crew.kickoff()
# Verify that we had multiple executions
assert len(execution_contexts) > 1, "Task should have been executed multiple times"
# Verify that the second execution included the guardrail feedback
assert "Output must contain the keyword 'IMPORTANT'" in execution_contexts[1], \
"Guardrail feedback should be included in retry context"
# Verify final output meets guardrail requirements
assert "IMPORTANT" in result.raw, "Final output should contain required keyword"
# Verify task retry count
assert task.retry_count == 1, "Task should have been retried once"

View File

@@ -578,14 +578,6 @@ def test_multiple_docling_sources():
assert docling_source.content is not None
def test_docling_source_with_local_file():
current_dir = Path(__file__).parent
pdf_path = current_dir / "crewai_quickstart.pdf"
docling_source = CrewDoclingSource(file_paths=[pdf_path])
assert docling_source.file_paths == [pdf_path]
assert docling_source.content is not None
def test_file_path_validation():
"""Test file path validation for knowledge sources."""
current_dir = Path(__file__).parent
@@ -606,6 +598,6 @@ def test_file_path_validation():
# Test neither file_path nor file_paths provided
with pytest.raises(
ValueError,
match="file_path/file_paths must be a Path, str, or a list of these types"
match="file_path/file_paths must be a Path, str, or a list of these types",
):
PDFKnowledgeSource()

View File

@@ -27,7 +27,7 @@ class SimpleCrew:
@CrewBase
class TestCrew:
class InternalCrew:
agents_config = "config/agents.yaml"
tasks_config = "config/tasks.yaml"
@@ -84,7 +84,7 @@ def test_task_memoization():
def test_crew_memoization():
crew = TestCrew()
crew = InternalCrew()
first_call_result = crew.crew()
second_call_result = crew.crew()
@@ -107,7 +107,7 @@ def test_task_name():
@pytest.mark.vcr(filter_headers=["authorization"])
def test_before_kickoff_modification():
crew = TestCrew()
crew = InternalCrew()
inputs = {"topic": "LLMs"}
result = crew.crew().kickoff(inputs=inputs)
assert "bicycles" in result.raw, "Before kickoff function did not modify inputs"
@@ -115,7 +115,7 @@ def test_before_kickoff_modification():
@pytest.mark.vcr(filter_headers=["authorization"])
def test_after_kickoff_modification():
crew = TestCrew()
crew = InternalCrew()
# Assuming the crew execution returns a dict
result = crew.crew().kickoff({"topic": "LLMs"})
@@ -126,7 +126,7 @@ def test_after_kickoff_modification():
@pytest.mark.vcr(filter_headers=["authorization"])
def test_before_kickoff_with_none_input():
crew = TestCrew()
crew = InternalCrew()
crew.crew().kickoff(None)
# Test should pass without raising exceptions

View File

@@ -719,7 +719,7 @@ def test_interpolate_inputs():
task = Task(
description="Give me a list of 5 interesting ideas about {topic} to explore for an article, what makes them unique and interesting.",
expected_output="Bullet point list of 5 interesting ideas about {topic}.",
output_file="/tmp/{topic}/output_{date}.txt"
output_file="/tmp/{topic}/output_{date}.txt",
)
task.interpolate_inputs(inputs={"topic": "AI", "date": "2024"})
@@ -742,41 +742,35 @@ def test_interpolate_inputs():
def test_interpolate_only():
"""Test the interpolate_only method for various scenarios including JSON structure preservation."""
task = Task(
description="Unused in this test",
expected_output="Unused in this test"
description="Unused in this test", expected_output="Unused in this test"
)
# Test JSON structure preservation
json_string = '{"info": "Look at {placeholder}", "nested": {"val": "{nestedVal}"}}'
result = task.interpolate_only(
input_string=json_string,
inputs={"placeholder": "the data", "nestedVal": "something else"}
inputs={"placeholder": "the data", "nestedVal": "something else"},
)
assert '"info": "Look at the data"' in result
assert '"val": "something else"' in result
assert "{placeholder}" not in result
assert "{nestedVal}" not in result
# Test normal string interpolation
normal_string = "Hello {name}, welcome to {place}!"
result = task.interpolate_only(
input_string=normal_string,
inputs={"name": "John", "place": "CrewAI"}
input_string=normal_string, inputs={"name": "John", "place": "CrewAI"}
)
assert result == "Hello John, welcome to CrewAI!"
# Test empty string
result = task.interpolate_only(
input_string="",
inputs={"unused": "value"}
)
result = task.interpolate_only(input_string="", inputs={"unused": "value"})
assert result == ""
# Test string with no placeholders
no_placeholders = "Hello, this is a test"
result = task.interpolate_only(
input_string=no_placeholders,
inputs={"unused": "value"}
input_string=no_placeholders, inputs={"unused": "value"}
)
assert result == no_placeholders
@@ -880,56 +874,91 @@ def test_key():
def test_output_file_validation():
"""Test output file path validation."""
# Valid paths
assert Task(
description="Test task",
expected_output="Test output",
output_file="output.txt"
).output_file == "output.txt"
assert Task(
description="Test task",
expected_output="Test output",
output_file="/tmp/output.txt"
).output_file == "tmp/output.txt"
assert Task(
description="Test task",
expected_output="Test output",
output_file="{dir}/output_{date}.txt"
).output_file == "{dir}/output_{date}.txt"
assert (
Task(
description="Test task",
expected_output="Test output",
output_file="output.txt",
).output_file
== "output.txt"
)
assert (
Task(
description="Test task",
expected_output="Test output",
output_file="/tmp/output.txt",
).output_file
== "tmp/output.txt"
)
assert (
Task(
description="Test task",
expected_output="Test output",
output_file="{dir}/output_{date}.txt",
).output_file
== "{dir}/output_{date}.txt"
)
# Invalid paths
with pytest.raises(ValueError, match="Path traversal"):
Task(
description="Test task",
expected_output="Test output",
output_file="../output.txt"
output_file="../output.txt",
)
with pytest.raises(ValueError, match="Path traversal"):
Task(
description="Test task",
expected_output="Test output",
output_file="folder/../output.txt"
output_file="folder/../output.txt",
)
with pytest.raises(ValueError, match="Shell special characters"):
Task(
description="Test task",
expected_output="Test output",
output_file="output.txt | rm -rf /"
output_file="output.txt | rm -rf /",
)
with pytest.raises(ValueError, match="Shell expansion"):
Task(
description="Test task",
expected_output="Test output",
output_file="~/output.txt"
output_file="~/output.txt",
)
with pytest.raises(ValueError, match="Shell expansion"):
Task(
description="Test task",
expected_output="Test output",
output_file="$HOME/output.txt"
output_file="$HOME/output.txt",
)
with pytest.raises(ValueError, match="Invalid template variable"):
Task(
description="Test task",
expected_output="Test output",
output_file="{invalid-name}/output.txt"
output_file="{invalid-name}/output.txt",
)
@pytest.mark.vcr(filter_headers=["authorization"])
def test_task_execution_times():
researcher = Agent(
role="Researcher",
goal="Make the best research and analysis on content about AI and AI agents",
backstory="You're an expert researcher, specialized in technology, software engineering, AI and startups. You work as a freelancer and is now working on doing research and analysis for a new customer.",
allow_delegation=False,
)
task = Task(
description="Give me a list of 5 interesting ideas to explore for na article, what makes them unique and interesting.",
expected_output="Bullet point list of 5 interesting ideas.",
agent=researcher,
)
assert task.start_time is None
assert task.end_time is None
assert task.execution_duration is None
task.execute_sync(agent=researcher)
assert task.start_time is not None
assert task.end_time is not None
assert task.execution_duration == (task.end_time - task.start_time).total_seconds()

View File

@@ -6,50 +6,51 @@ from crewai import Agent, Task
from crewai.tools.agent_tools.base_agent_tools import BaseAgentTool
class TestAgentTool(BaseAgentTool):
class InternalAgentTool(BaseAgentTool):
"""Concrete implementation of BaseAgentTool for testing."""
def _run(self, *args, **kwargs):
"""Implement required _run method."""
return "Test response"
@pytest.mark.parametrize("role_name,should_match", [
('Futel Official Infopoint', True), # exact match
(' "Futel Official Infopoint" ', True), # extra quotes and spaces
('Futel Official Infopoint\n', True), # trailing newline
('"Futel Official Infopoint"', True), # embedded quotes
(' FUTEL\nOFFICIAL INFOPOINT ', True), # multiple whitespace and newline
('futel official infopoint', True), # lowercase
('FUTEL OFFICIAL INFOPOINT', True), # uppercase
('Non Existent Agent', False), # non-existent agent
(None, False), # None agent name
])
@pytest.mark.parametrize(
"role_name,should_match",
[
("Futel Official Infopoint", True), # exact match
(' "Futel Official Infopoint" ', True), # extra quotes and spaces
("Futel Official Infopoint\n", True), # trailing newline
('"Futel Official Infopoint"', True), # embedded quotes
(" FUTEL\nOFFICIAL INFOPOINT ", True), # multiple whitespace and newline
("futel official infopoint", True), # lowercase
("FUTEL OFFICIAL INFOPOINT", True), # uppercase
("Non Existent Agent", False), # non-existent agent
(None, False), # None agent name
],
)
def test_agent_tool_role_matching(role_name, should_match):
"""Test that agent tools can match roles regardless of case, whitespace, and special characters."""
# Create test agent
test_agent = Agent(
role='Futel Official Infopoint',
goal='Answer questions about Futel',
backstory='Futel Football Club info',
allow_delegation=False
role="Futel Official Infopoint",
goal="Answer questions about Futel",
backstory="Futel Football Club info",
allow_delegation=False,
)
# Create test agent tool
agent_tool = TestAgentTool(
name="test_tool",
description="Test tool",
agents=[test_agent]
agent_tool = InternalAgentTool(
name="test_tool", description="Test tool", agents=[test_agent]
)
# Test role matching
result = agent_tool._execute(
agent_name=role_name,
task='Test task',
context=None
)
result = agent_tool._execute(agent_name=role_name, task="Test task", context=None)
if should_match:
assert "coworker mentioned not found" not in result.lower(), \
f"Should find agent with role name: {role_name}"
assert (
"coworker mentioned not found" not in result.lower()
), f"Should find agent with role name: {role_name}"
else:
assert "coworker mentioned not found" in result.lower(), \
f"Should not find agent with role name: {role_name}"
assert (
"coworker mentioned not found" in result.lower()
), f"Should not find agent with role name: {role_name}"

View File

@@ -15,10 +15,7 @@ def test_task_without_guardrail():
agent.execute_task.return_value = "test result"
agent.crew = None
task = Task(
description="Test task",
expected_output="Output"
)
task = Task(description="Test task", expected_output="Output")
result = task.execute_sync(agent=agent)
assert isinstance(result, TaskOutput)
@@ -27,6 +24,7 @@ def test_task_without_guardrail():
def test_task_with_successful_guardrail():
"""Test that successful guardrail validation passes transformed result."""
def guardrail(result: TaskOutput):
return (True, result.raw.upper())
@@ -35,11 +33,7 @@ def test_task_with_successful_guardrail():
agent.execute_task.return_value = "test result"
agent.crew = None
task = Task(
description="Test task",
expected_output="Output",
guardrail=guardrail
)
task = Task(description="Test task", expected_output="Output", guardrail=guardrail)
result = task.execute_sync(agent=agent)
assert isinstance(result, TaskOutput)
@@ -48,22 +42,20 @@ def test_task_with_successful_guardrail():
def test_task_with_failing_guardrail():
"""Test that failing guardrail triggers retry with error context."""
def guardrail(result: TaskOutput):
return (False, "Invalid format")
agent = Mock()
agent.role = "test_agent"
agent.execute_task.side_effect = [
"bad result",
"good result"
]
agent.execute_task.side_effect = ["bad result", "good result"]
agent.crew = None
task = Task(
description="Test task",
expected_output="Output",
guardrail=guardrail,
max_retries=1
max_retries=1,
)
# First execution fails guardrail, second succeeds
@@ -77,6 +69,7 @@ def test_task_with_failing_guardrail():
def test_task_with_guardrail_retries():
"""Test that guardrail respects max_retries configuration."""
def guardrail(result: TaskOutput):
return (False, "Invalid format")
@@ -89,7 +82,7 @@ def test_task_with_guardrail_retries():
description="Test task",
expected_output="Output",
guardrail=guardrail,
max_retries=2
max_retries=2,
)
with pytest.raises(Exception) as exc_info:
@@ -102,6 +95,7 @@ def test_task_with_guardrail_retries():
def test_guardrail_error_in_context():
"""Test that guardrail error is passed in context for retry."""
def guardrail(result: TaskOutput):
return (False, "Expected JSON, got string")
@@ -113,11 +107,12 @@ def test_guardrail_error_in_context():
description="Test task",
expected_output="Output",
guardrail=guardrail,
max_retries=1
max_retries=1,
)
# Mock execute_task to succeed on second attempt
first_call = True
def execute_task(task, context, tools):
nonlocal first_call
if first_call:

View File

@@ -15,7 +15,7 @@ def test_creating_a_tool_using_annotation():
my_tool.description
== "Tool Name: Name of my tool\nTool Arguments: {'question': {'description': None, 'type': 'str'}}\nTool Description: Clear description for what this tool is useful for, your agent will need this information to use it."
)
assert my_tool.args_schema.schema()["properties"] == {
assert my_tool.args_schema.model_json_schema()["properties"] == {
"question": {"title": "Question", "type": "string"}
}
assert (
@@ -29,7 +29,7 @@ def test_creating_a_tool_using_annotation():
converted_tool.description
== "Tool Name: Name of my tool\nTool Arguments: {'question': {'description': None, 'type': 'str'}}\nTool Description: Clear description for what this tool is useful for, your agent will need this information to use it."
)
assert converted_tool.args_schema.schema()["properties"] == {
assert converted_tool.args_schema.model_json_schema()["properties"] == {
"question": {"title": "Question", "type": "string"}
}
assert (
@@ -54,7 +54,7 @@ def test_creating_a_tool_using_baseclass():
my_tool.description
== "Tool Name: Name of my tool\nTool Arguments: {'question': {'description': None, 'type': 'str'}}\nTool Description: Clear description for what this tool is useful for, your agent will need this information to use it."
)
assert my_tool.args_schema.schema()["properties"] == {
assert my_tool.args_schema.model_json_schema()["properties"] == {
"question": {"title": "Question", "type": "string"}
}
assert my_tool.run("What is the meaning of life?") == "What is the meaning of life?"
@@ -66,7 +66,7 @@ def test_creating_a_tool_using_baseclass():
converted_tool.description
== "Tool Name: Name of my tool\nTool Arguments: {'question': {'description': None, 'type': 'str'}}\nTool Description: Clear description for what this tool is useful for, your agent will need this information to use it."
)
assert converted_tool.args_schema.schema()["properties"] == {
assert converted_tool.args_schema.model_json_schema()["properties"] == {
"question": {"title": "Question", "type": "string"}
}
assert (

View File

@@ -25,7 +25,7 @@ def schema_class():
return TestSchema
class TestCrewStructuredTool:
class InternalCrewStructuredTool:
def test_initialization(self, basic_function, schema_class):
"""Test basic initialization of CrewStructuredTool"""
tool = CrewStructuredTool(

View File

@@ -12,7 +12,7 @@ from crewai.utilities.evaluators.crew_evaluator_handler import (
)
class TestCrewEvaluator:
class InternalCrewEvaluator:
@pytest.fixture
def crew_planner(self):
agent = Agent(role="Agent 1", goal="Goal 1", backstory="Backstory 1")

View File

@@ -16,7 +16,7 @@ from crewai.utilities.planning_handler import (
)
class TestCrewPlanner:
class InternalCrewPlanner:
@pytest.fixture
def crew_planner(self):
tasks = [
@@ -115,13 +115,13 @@ class TestCrewPlanner:
def __init__(self, name: str, description: str):
tool_data = {"name": name, "description": description}
super().__init__(**tool_data)
def __str__(self):
return self.name
def __repr__(self):
return self.name
def to_structured_tool(self):
return self
@@ -149,11 +149,11 @@ class TestCrewPlanner:
]
)
)
# Create planner with the new task
planner = CrewPlanner([task], None)
tasks_summary = planner._create_tasks_summary()
# Verify task summary content
assert isinstance(tasks_summary, str)
assert task.description in tasks_summary

View File

@@ -4,7 +4,7 @@ import unittest
from crewai.utilities.training_handler import CrewTrainingHandler
class TestCrewTrainingHandler(unittest.TestCase):
class InternalCrewTrainingHandler(unittest.TestCase):
def setUp(self):
self.handler = CrewTrainingHandler("trained_data.pkl")