mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-10 16:48:30 +00:00
Merge branch 'main' into bug_fix
This commit is contained in:
@@ -1621,6 +1621,38 @@ def test_agent_with_knowledge_sources():
|
||||
assert "red" in result.raw.lower()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_with_knowledge_sources_extensive_role():
|
||||
content = "Brandon's favorite color is red and he likes Mexican food."
|
||||
string_source = StringKnowledgeSource(content=content)
|
||||
|
||||
with patch(
|
||||
"crewai.knowledge.storage.knowledge_storage.KnowledgeStorage"
|
||||
) as MockKnowledge:
|
||||
mock_knowledge_instance = MockKnowledge.return_value
|
||||
mock_knowledge_instance.sources = [string_source]
|
||||
mock_knowledge_instance.query.return_value = [{"content": content}]
|
||||
|
||||
agent = Agent(
|
||||
role="Information Agent with extensive role description that is longer than 80 characters",
|
||||
goal="Provide information based on knowledge sources",
|
||||
backstory="You have access to specific knowledge sources.",
|
||||
llm=LLM(model="gpt-4o-mini"),
|
||||
knowledge_sources=[string_source],
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="What is Brandon's favorite color?",
|
||||
expected_output="Brandon's favorite color.",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
result = crew.kickoff()
|
||||
|
||||
assert "red" in result.raw.lower()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_with_knowledge_sources_works_with_copy():
|
||||
content = "Brandon's favorite color is red and he likes Mexican food."
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -710,4 +710,117 @@ interactions:
|
||||
- req_4ceac9bc8ae57f631959b91d2ab63c4d
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test agent
|
||||
backstory\nYour personal goal is: Test agent goal\nTo give my best complete
|
||||
final answer to the task respond using the exact following format:\n\nThought:
|
||||
I now can give a great answer\nFinal Answer: Your final answer must be the great
|
||||
and the most complete as possible, it must be outcome described.\n\nI MUST use
|
||||
these formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent
|
||||
Task: Test task description\n\nThis is the expected criteria for your final
|
||||
answer: Test expected output\nyou MUST return the actual complete content as
|
||||
the final answer, not a summary.\n\nBegin! This is VERY important to you, use
|
||||
the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],
|
||||
"model": "gpt-4o", "stop": ["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '840'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.61.0
|
||||
x-stainless-arch:
|
||||
- x64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.61.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-BExKOliqPgvHyozZaBu5oN50CHtsa\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1742904348,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"I now can give a great answer \\nFinal
|
||||
Answer: Test expected output\",\n \"refusal\": null,\n \"annotations\":
|
||||
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 158,\n \"completion_tokens\":
|
||||
15,\n \"total_tokens\": 173,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_90d33c15d4\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 925e4749af02f227-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 25 Mar 2025 12:05:48 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=VHa7Z7dJYptxXpaMxgldvK6HqIM.m74xpi.80N_EBDc-1742904348-1.0.1.1-VthD2riCSnAprFYhOZxfIrTjT33tybJHpHWB25Q_Hx4vuACCyF00tix6e6eorDReGcW3jb5cUzbGqYi47TrMsS4LYjxBv5eCo7cU9OuFajs;
|
||||
path=/; expires=Tue, 25-Mar-25 12:35:48 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=Is8fSaH3lU8yHyT3fI7cRZiDqIYSI6sPpzfzvEV8HMc-1742904348760-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '377'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '50000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '49999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999822'
|
||||
x-ratelimit-reset-requests:
|
||||
- 1ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_fd6b93e3b1a30868482c72306e7f63c2
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
version: 1
|
||||
|
||||
@@ -0,0 +1,378 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: !!binary |
|
||||
CpIKCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkS6QkKEgoQY3Jld2FpLnRl
|
||||
bGVtZXRyeRLBBwoQ08SlQ6w2FsCauTgZCqberRIITfOsgNi1qJkqDENyZXcgQ3JlYXRlZDABOdjG
|
||||
6D/PcDAYQahPEkDPcDAYShsKDmNyZXdhaV92ZXJzaW9uEgkKBzAuMTA4LjBKGgoOcHl0aG9uX3Zl
|
||||
cnNpb24SCAoGMy4xMi45Si4KCGNyZXdfa2V5EiIKIDkwNzMxMTU4MzVlMWNhZjJhNmUxNTIyZDA1
|
||||
YTBiNTFkSjEKB2NyZXdfaWQSJgokMzdjOGM4NzgtN2NmZC00YjEyLWE4YzctYzIyZDZlOTIxODBk
|
||||
ShwKDGNyZXdfcHJvY2VzcxIMCgpzZXF1ZW50aWFsShEKC2NyZXdfbWVtb3J5EgIQAEoaChRjcmV3
|
||||
X251bWJlcl9vZl90YXNrcxICGAFKGwoVY3Jld19udW1iZXJfb2ZfYWdlbnRzEgIYAUrgAgoLY3Jl
|
||||
d19hZ2VudHMS0AIKzQJbeyJrZXkiOiAiNzYyM2ZjNGY3ZDk0Y2YzZmRiZmNjMjlmYjBiMDIyYmIi
|
||||
LCAiaWQiOiAiYmVjMjljMTAtOTljYi00MzQwLWIwYTItMWU1NTVkNGRmZGM0IiwgInJvbGUiOiAi
|
||||
VmlzdWFsIFF1YWxpdHkgSW5zcGVjdG9yIiwgInZlcmJvc2U/IjogdHJ1ZSwgIm1heF9pdGVyIjog
|
||||
MjUsICJtYXhfcnBtIjogbnVsbCwgImZ1bmN0aW9uX2NhbGxpbmdfbGxtIjogIiIsICJsbG0iOiAi
|
||||
b3BlbmFpL2dwdC00byIsICJkZWxlZ2F0aW9uX2VuYWJsZWQ/IjogZmFsc2UsICJhbGxvd19jb2Rl
|
||||
X2V4ZWN1dGlvbj8iOiBmYWxzZSwgIm1heF9yZXRyeV9saW1pdCI6IDIsICJ0b29sc19uYW1lcyI6
|
||||
IFtdfV1KjQIKCmNyZXdfdGFza3MS/gEK+wFbeyJrZXkiOiAiMDExM2E5ZTg0N2M2NjI2ZDY0ZDZk
|
||||
Yzk4M2IwNDA5MTgiLCAiaWQiOiAiZWQzYmY1YWUtZTBjMS00MjIxLWFhYTgtMThlNjVkYTMyZjc1
|
||||
IiwgImFzeW5jX2V4ZWN1dGlvbj8iOiBmYWxzZSwgImh1bWFuX2lucHV0PyI6IGZhbHNlLCAiYWdl
|
||||
bnRfcm9sZSI6ICJWaXN1YWwgUXVhbGl0eSBJbnNwZWN0b3IiLCAiYWdlbnRfa2V5IjogIjc2MjNm
|
||||
YzRmN2Q5NGNmM2ZkYmZjYzI5ZmIwYjAyMmJiIiwgInRvb2xzX25hbWVzIjogW119XXoCGAGFAQAB
|
||||
AAASjgIKECo77ESam8oLrZMmgLLaoksSCLE6x14/Kb1vKgxUYXNrIENyZWF0ZWQwATlI/chAz3Aw
|
||||
GEEAgMpAz3AwGEouCghjcmV3X2tleRIiCiA5MDczMTE1ODM1ZTFjYWYyYTZlMTUyMmQwNWEwYjUx
|
||||
ZEoxCgdjcmV3X2lkEiYKJDM3YzhjODc4LTdjZmQtNGIxMi1hOGM3LWMyMmQ2ZTkyMTgwZEouCgh0
|
||||
YXNrX2tleRIiCiAwMTEzYTllODQ3YzY2MjZkNjRkNmRjOTgzYjA0MDkxOEoxCgd0YXNrX2lkEiYK
|
||||
JGVkM2JmNWFlLWUwYzEtNDIyMS1hYWE4LTE4ZTY1ZGEzMmY3NXoCGAGFAQABAAA=
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate, zstd
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '1301'
|
||||
Content-Type:
|
||||
- application/x-protobuf
|
||||
User-Agent:
|
||||
- OTel-OTLP-Exporter-Python/1.31.1
|
||||
method: POST
|
||||
uri: https://telemetry.crewai.com:4319/v1/traces
|
||||
response:
|
||||
body:
|
||||
string: "\n\0"
|
||||
headers:
|
||||
Content-Length:
|
||||
- '2'
|
||||
Content-Type:
|
||||
- application/x-protobuf
|
||||
Date:
|
||||
- Wed, 26 Mar 2025 19:24:52 GMT
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Visual Quality Inspector.
|
||||
Senior quality control expert with expertise in visual inspection\nYour personal
|
||||
goal is: Perform detailed quality analysis of product images\nYou ONLY have
|
||||
access to the following tools, and should NEVER make up tools that are not listed
|
||||
here:\n\nTool Name: Add image to content\nTool Arguments: {''image_url'': {''description'':
|
||||
''The URL or path of the image to add'', ''type'': ''str''}, ''action'': {''description'':
|
||||
''Optional context or question about the image'', ''type'': ''Union[str, NoneType]''}}\nTool
|
||||
Description: See image to understand its content, you can optionally ask a question
|
||||
about the image\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought:
|
||||
you should always think about what to do\nAction: the action to take, only one
|
||||
name of [Add image to content], just the name, exactly as it''s written.\nAction
|
||||
Input: the input to the action, just a simple JSON object, enclosed in curly
|
||||
braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce
|
||||
all necessary information is gathered, return the following format:\n\n```\nThought:
|
||||
I now know the final answer\nFinal Answer: the final answer to the original
|
||||
input question\n```"}, {"role": "user", "content": "\nCurrent Task: \n Analyze
|
||||
the product image at https://www.us.maguireshoes.com/cdn/shop/files/FW24-Edito-Lucena-Distressed-01_1920x.jpg?v=1736371244
|
||||
with focus on:\n 1. Quality of materials\n 2. Manufacturing defects\n 3.
|
||||
Compliance with standards\n Provide a detailed report highlighting any
|
||||
issues found.\n \n\nThis is the expected criteria for your final answer:
|
||||
A detailed report highlighting any issues found\nyou MUST return the actual
|
||||
complete content as the final answer, not a summary.\n\nBegin! This is VERY
|
||||
important to you, use the tools available and give your best Final Answer, your
|
||||
job depends on it!\n\nThought:"}], "model": "gpt-4o", "stop": ["\nObservation:"],
|
||||
"temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '2033'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.68.2
|
||||
x-stainless-arch:
|
||||
- x64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.68.2
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-BFQepLwSYYzdKLylSFsgcJeg6GTqS\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1743017091,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I need to examine the product
|
||||
image to assess the quality of materials, look for any manufacturing defects,
|
||||
and check compliance with standards.\\n\\nAction: Add image to content\\nAction
|
||||
Input: {\\\"image_url\\\": \\\"https://www.us.maguireshoes.com/cdn/shop/files/FW24-Edito-Lucena-Distressed-01_1920x.jpg?v=1736371244\\\",
|
||||
\\\"action\\\": \\\"Analyze the quality of materials, manufacturing defects,
|
||||
and compliance with standards.\\\"}\",\n \"refusal\": null,\n \"annotations\":
|
||||
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 413,\n \"completion_tokens\":
|
||||
101,\n \"total_tokens\": 514,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_7e8d90e604\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 926907d79dcff1e7-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 26 Mar 2025 19:24:53 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=WK433.4kW8cr9rwvOlk4EZ2SfRYK9lAPwXCBYEvLcmU-1743017093-1.0.1.1-kVZyUew5rUbMk.2koGJF_rmX.fTseqN241n2M40n8KvBGoKgy6KM6xBmvFbIVWxUs2Y5ZAz8mWy9CrGjaNKSfCzxmv4.pq78z_DGHr37PgI;
|
||||
path=/; expires=Wed, 26-Mar-25 19:54:53 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=T77PMcuNYeyzK0tQyDOe7EScjVBVzW_7DpD3YQBqmUc-1743017093675-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '1729'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '50000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '49999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999534'
|
||||
x-ratelimit-reset-requests:
|
||||
- 1ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_2399c3355adf16734907c73611a7d330
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
- request:
|
||||
body: !!binary |
|
||||
CtgBCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkSrwEKEgoQY3Jld2FpLnRl
|
||||
bGVtZXRyeRKYAQoQp2ACB2xRGve4HGtU2RdWCBIIlQcsbhK22ykqClRvb2wgVXNhZ2UwATlACEXG
|
||||
z3AwGEHAjGPGz3AwGEobCg5jcmV3YWlfdmVyc2lvbhIJCgcwLjEwOC4wSiMKCXRvb2xfbmFtZRIW
|
||||
ChRBZGQgaW1hZ2UgdG8gY29udGVudEoOCghhdHRlbXB0cxICGAF6AhgBhQEAAQAA
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate, zstd
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '219'
|
||||
Content-Type:
|
||||
- application/x-protobuf
|
||||
User-Agent:
|
||||
- OTel-OTLP-Exporter-Python/1.31.1
|
||||
method: POST
|
||||
uri: https://telemetry.crewai.com:4319/v1/traces
|
||||
response:
|
||||
body:
|
||||
string: "\n\0"
|
||||
headers:
|
||||
Content-Length:
|
||||
- '2'
|
||||
Content-Type:
|
||||
- application/x-protobuf
|
||||
Date:
|
||||
- Wed, 26 Mar 2025 19:24:57 GMT
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Visual Quality Inspector.
|
||||
Senior quality control expert with expertise in visual inspection\nYour personal
|
||||
goal is: Perform detailed quality analysis of product images\nYou ONLY have
|
||||
access to the following tools, and should NEVER make up tools that are not listed
|
||||
here:\n\nTool Name: Add image to content\nTool Arguments: {''image_url'': {''description'':
|
||||
''The URL or path of the image to add'', ''type'': ''str''}, ''action'': {''description'':
|
||||
''Optional context or question about the image'', ''type'': ''Union[str, NoneType]''}}\nTool
|
||||
Description: See image to understand its content, you can optionally ask a question
|
||||
about the image\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought:
|
||||
you should always think about what to do\nAction: the action to take, only one
|
||||
name of [Add image to content], just the name, exactly as it''s written.\nAction
|
||||
Input: the input to the action, just a simple JSON object, enclosed in curly
|
||||
braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce
|
||||
all necessary information is gathered, return the following format:\n\n```\nThought:
|
||||
I now know the final answer\nFinal Answer: the final answer to the original
|
||||
input question\n```"}, {"role": "user", "content": "\nCurrent Task: \n Analyze
|
||||
the product image at https://www.us.maguireshoes.com/cdn/shop/files/FW24-Edito-Lucena-Distressed-01_1920x.jpg?v=1736371244
|
||||
with focus on:\n 1. Quality of materials\n 2. Manufacturing defects\n 3.
|
||||
Compliance with standards\n Provide a detailed report highlighting any
|
||||
issues found.\n \n\nThis is the expected criteria for your final answer:
|
||||
A detailed report highlighting any issues found\nyou MUST return the actual
|
||||
complete content as the final answer, not a summary.\n\nBegin! This is VERY
|
||||
important to you, use the tools available and give your best Final Answer, your
|
||||
job depends on it!\n\nThought:"}, {"role": "user", "content": [{"type": "text",
|
||||
"text": "Analyze the quality of materials, manufacturing defects, and compliance
|
||||
with standards."}, {"type": "image_url", "image_url": {"url": "https://www.us.maguireshoes.com/cdn/shop/files/FW24-Edito-Lucena-Distressed-01_1920x.jpg?v=1736371244"}}]},
|
||||
{"role": "assistant", "content": "Thought: I need to examine the product image
|
||||
to assess the quality of materials, look for any manufacturing defects, and
|
||||
check compliance with standards.\n\nAction: Add image to content\nAction Input:
|
||||
{\"image_url\": \"https://www.us.maguireshoes.com/cdn/shop/files/FW24-Edito-Lucena-Distressed-01_1920x.jpg?v=1736371244\",
|
||||
\"action\": \"Analyze the quality of materials, manufacturing defects, and compliance
|
||||
with standards.\"}"}], "model": "gpt-4o", "stop": ["\nObservation:"], "temperature":
|
||||
0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '2797'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=WK433.4kW8cr9rwvOlk4EZ2SfRYK9lAPwXCBYEvLcmU-1743017093-1.0.1.1-kVZyUew5rUbMk.2koGJF_rmX.fTseqN241n2M40n8KvBGoKgy6KM6xBmvFbIVWxUs2Y5ZAz8mWy9CrGjaNKSfCzxmv4.pq78z_DGHr37PgI;
|
||||
_cfuvid=T77PMcuNYeyzK0tQyDOe7EScjVBVzW_7DpD3YQBqmUc-1743017093675-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.68.2
|
||||
x-stainless-arch:
|
||||
- x64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.68.2
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-BFQetNNvmPgPxhzaKiHYsPqm8aN0i\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1743017095,\n \"model\": \"gpt-4o-2024-08-06\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Observation: The image displays a black
|
||||
leather boot with a pointed toe and a low heel. \\n\\nQuality of Materials:\\n1.
|
||||
The leather appears to be of good quality, displaying a consistent texture and
|
||||
finish, which suggests durability.\\n2. The material has a slight sheen, indicating
|
||||
a possible finishing treatment that enhances the appearance and may offer some
|
||||
protection.\\n\\nManufacturing Defects:\\n1. There are no visible stitching
|
||||
errors; the seams appear straight and clean.\\n2. No apparent glue marks or
|
||||
uneven edges, which indicates good craftsmanship.\\n3. There is a slight distressed
|
||||
effect, but it appears intentional as part of the design rather than a defect.\\n\\nCompliance
|
||||
with Standards:\\n1. The shoe design seems to comply with typical fashion standards,
|
||||
showing a balance of aesthetics and functionality.\\n2. The heel height and
|
||||
shape appear to provide stability, aligning with safety standards for footwear.\\n\\nFinal
|
||||
Answer: The analysis of the product image reveals that the black leather boot
|
||||
is made of high-quality materials with no visible manufacturing defects. The
|
||||
craftsmanship is precise, with clean seams and a well-executed design. The distressed
|
||||
effect appears intentional and part of the aesthetic. The boot seems to comply
|
||||
with fashion and safety standards, offering both style and functionality. No
|
||||
significant issues were found.\",\n \"refusal\": null,\n \"annotations\":
|
||||
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 1300,\n \"completion_tokens\":
|
||||
250,\n \"total_tokens\": 1550,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
|
||||
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_3a5b33c01a\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 926907e45f33f1e7-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Wed, 26 Mar 2025 19:25:01 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '7242'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-input-images:
|
||||
- '250000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '50000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-input-images:
|
||||
- '249999'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '49999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149998641'
|
||||
x-ratelimit-reset-input-images:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 1ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_c5dd144c8ac1bb3bd96ffbba40707b2d
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
version: 1
|
||||
@@ -11,7 +11,9 @@ import pydantic_core
|
||||
import pytest
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.agents import CacheHandler
|
||||
from crewai.agents.cache import CacheHandler
|
||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||
from crewai.crew import Crew
|
||||
from crewai.crews.crew_output import CrewOutput
|
||||
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
|
||||
@@ -3731,6 +3733,44 @@ def test_multimodal_agent_image_tool_handling():
|
||||
assert result["content"][1]["type"] == "image_url"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_multimodal_agent_describing_image_successfully():
|
||||
"""
|
||||
Test that a multimodal agent can process images without validation errors.
|
||||
This test reproduces the scenario from issue #2475.
|
||||
"""
|
||||
llm = LLM(model="openai/gpt-4o", temperature=0.7) # model with vision capabilities
|
||||
|
||||
expert_analyst = Agent(
|
||||
role="Visual Quality Inspector",
|
||||
goal="Perform detailed quality analysis of product images",
|
||||
backstory="Senior quality control expert with expertise in visual inspection",
|
||||
llm=llm,
|
||||
verbose=True,
|
||||
allow_delegation=False,
|
||||
multimodal=True,
|
||||
)
|
||||
|
||||
inspection_task = Task(
|
||||
description="""
|
||||
Analyze the product image at https://www.us.maguireshoes.com/cdn/shop/files/FW24-Edito-Lucena-Distressed-01_1920x.jpg?v=1736371244 with focus on:
|
||||
1. Quality of materials
|
||||
2. Manufacturing defects
|
||||
3. Compliance with standards
|
||||
Provide a detailed report highlighting any issues found.
|
||||
""",
|
||||
expected_output="A detailed report highlighting any issues found",
|
||||
agent=expert_analyst,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[expert_analyst], tasks=[inspection_task])
|
||||
result = crew.kickoff()
|
||||
|
||||
task_output = result.tasks_output[0]
|
||||
assert isinstance(task_output, TaskOutput)
|
||||
assert task_output.raw == result.raw
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_multimodal_agent_live_image_analysis():
|
||||
"""
|
||||
@@ -4025,3 +4065,52 @@ def test_crew_with_knowledge_sources_works_with_copy():
|
||||
assert len(crew_copy.tasks) == len(crew.tasks)
|
||||
|
||||
assert len(crew_copy.tasks) == len(crew.tasks)
|
||||
|
||||
|
||||
def test_crew_kickoff_for_each_works_with_manager_agent_copy():
|
||||
researcher = Agent(
|
||||
role="Researcher",
|
||||
goal="Conduct thorough research and analysis on AI and AI agents",
|
||||
backstory="You're an expert researcher, specialized in technology, software engineering, AI, and startups. You work as a freelancer and are currently researching for a new client.",
|
||||
allow_delegation=False
|
||||
)
|
||||
|
||||
writer = Agent(
|
||||
role="Senior Writer",
|
||||
goal="Create compelling content about AI and AI agents",
|
||||
backstory="You're a senior writer, specialized in technology, software engineering, AI, and startups. You work as a freelancer and are currently writing content for a new client.",
|
||||
allow_delegation=False
|
||||
)
|
||||
|
||||
# Define task
|
||||
task = Task(
|
||||
description="Generate a list of 5 interesting ideas for an article, then write one captivating paragraph for each idea that showcases the potential of a full article on this topic. Return the list of ideas with their paragraphs and your notes.",
|
||||
expected_output="5 bullet points, each with a paragraph and accompanying notes.",
|
||||
)
|
||||
|
||||
# Define manager agent
|
||||
manager = Agent(
|
||||
role="Project Manager",
|
||||
goal="Efficiently manage the crew and ensure high-quality task completion",
|
||||
backstory="You're an experienced project manager, skilled in overseeing complex projects and guiding teams to success. Your role is to coordinate the efforts of the crew members, ensuring that each task is completed on time and to the highest standard.",
|
||||
allow_delegation=True
|
||||
)
|
||||
|
||||
# Instantiate crew with a custom manager
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
tasks=[task],
|
||||
manager_agent=manager,
|
||||
process=Process.hierarchical,
|
||||
verbose=True
|
||||
)
|
||||
|
||||
crew_copy = crew.copy()
|
||||
assert crew_copy.manager_agent is not None
|
||||
assert crew_copy.manager_agent.id != crew.manager_agent.id
|
||||
assert crew_copy.manager_agent.role == crew.manager_agent.role
|
||||
assert crew_copy.manager_agent.goal == crew.manager_agent.goal
|
||||
assert crew_copy.manager_agent.backstory == crew.manager_agent.backstory
|
||||
assert isinstance(crew_copy.manager_agent.agent_executor, CrewAgentExecutor)
|
||||
assert isinstance(crew_copy.manager_agent.cache_handler, CacheHandler)
|
||||
|
||||
|
||||
68
tests/memory/user_memory_test.py
Normal file
68
tests/memory/user_memory_test.py
Normal file
@@ -0,0 +1,68 @@
|
||||
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from mem0.memory.main import Memory
|
||||
|
||||
from crewai.memory.user.user_memory import UserMemory
|
||||
from crewai.memory.user.user_memory_item import UserMemoryItem
|
||||
|
||||
|
||||
class MockCrew:
|
||||
def __init__(self, memory_config):
|
||||
self.memory_config = memory_config
|
||||
|
||||
@pytest.fixture
|
||||
def user_memory():
|
||||
"""Fixture to create a UserMemory instance"""
|
||||
crew = MockCrew(
|
||||
memory_config={
|
||||
"provider": "mem0",
|
||||
"config": {"user_id": "john"},
|
||||
"user_memory" : {}
|
||||
}
|
||||
)
|
||||
|
||||
user_memory = MagicMock(spec=UserMemory)
|
||||
|
||||
with patch.object(Memory,'__new__',return_value=user_memory):
|
||||
user_memory_instance = UserMemory(crew=crew)
|
||||
|
||||
return user_memory_instance
|
||||
|
||||
def test_save_and_search(user_memory):
|
||||
memory = UserMemoryItem(
|
||||
data="""test value test value test value test value test value test value
|
||||
test value test value test value test value test value test value
|
||||
test value test value test value test value test value test value""",
|
||||
user="test_user",
|
||||
metadata={"task": "test_task"},
|
||||
)
|
||||
|
||||
with patch.object(UserMemory, "save") as mock_save:
|
||||
user_memory.save(
|
||||
value=memory.data,
|
||||
metadata=memory.metadata,
|
||||
user=memory.user
|
||||
)
|
||||
|
||||
mock_save.assert_called_once_with(
|
||||
value=memory.data,
|
||||
metadata=memory.metadata,
|
||||
user=memory.user
|
||||
)
|
||||
|
||||
expected_result = [
|
||||
{
|
||||
"context": memory.data,
|
||||
"metadata": {"agent": "test_agent"},
|
||||
"score": 0.95,
|
||||
}
|
||||
]
|
||||
expected_result = ["mocked_result"]
|
||||
|
||||
# Use patch.object to mock UserMemory's search method
|
||||
with patch.object(UserMemory, 'search', return_value=expected_result) as mock_search:
|
||||
find = UserMemory.search("test value", score_threshold=0.01)[0]
|
||||
mock_search.assert_called_once_with("test value", score_threshold=0.01)
|
||||
assert find == expected_result[0]
|
||||
114
tests/storage/test_mem0_storage.py
Normal file
114
tests/storage/test_mem0_storage.py
Normal file
@@ -0,0 +1,114 @@
|
||||
import os
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from mem0.client.main import MemoryClient
|
||||
from mem0.memory.main import Memory
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.crew import Crew
|
||||
from crewai.memory.storage.mem0_storage import Mem0Storage
|
||||
from crewai.task import Task
|
||||
|
||||
|
||||
# Define the class (if not already defined)
|
||||
class MockCrew:
|
||||
def __init__(self, memory_config):
|
||||
self.memory_config = memory_config
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_mem0_memory():
|
||||
"""Fixture to create a mock Memory instance"""
|
||||
mock_memory = MagicMock(spec=Memory)
|
||||
return mock_memory
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mem0_storage_with_mocked_config(mock_mem0_memory):
|
||||
"""Fixture to create a Mem0Storage instance with mocked dependencies"""
|
||||
|
||||
# Patch the Memory class to return our mock
|
||||
with patch('mem0.memory.main.Memory.from_config', return_value=mock_mem0_memory):
|
||||
config = {
|
||||
"vector_store": {
|
||||
"provider": "mock_vector_store",
|
||||
"config": {
|
||||
"host": "localhost",
|
||||
"port": 6333
|
||||
}
|
||||
},
|
||||
"llm": {
|
||||
"provider": "mock_llm",
|
||||
"config": {
|
||||
"api_key": "mock-api-key",
|
||||
"model": "mock-model"
|
||||
}
|
||||
},
|
||||
"embedder": {
|
||||
"provider": "mock_embedder",
|
||||
"config": {
|
||||
"api_key": "mock-api-key",
|
||||
"model": "mock-model"
|
||||
}
|
||||
},
|
||||
"graph_store": {
|
||||
"provider": "mock_graph_store",
|
||||
"config": {
|
||||
"url": "mock-url",
|
||||
"username": "mock-user",
|
||||
"password": "mock-password"
|
||||
}
|
||||
},
|
||||
"history_db_path": "/mock/path",
|
||||
"version": "test-version",
|
||||
"custom_fact_extraction_prompt": "mock prompt 1",
|
||||
"custom_update_memory_prompt": "mock prompt 2"
|
||||
}
|
||||
|
||||
# Instantiate the class with memory_config
|
||||
crew = MockCrew(
|
||||
memory_config={
|
||||
"provider": "mem0",
|
||||
"config": {"user_id": "test_user", "local_mem0_config": config},
|
||||
}
|
||||
)
|
||||
|
||||
mem0_storage = Mem0Storage(type="short_term", crew=crew)
|
||||
return mem0_storage
|
||||
|
||||
|
||||
def test_mem0_storage_initialization(mem0_storage_with_mocked_config, mock_mem0_memory):
|
||||
"""Test that Mem0Storage initializes correctly with the mocked config"""
|
||||
assert mem0_storage_with_mocked_config.memory_type == "short_term"
|
||||
assert mem0_storage_with_mocked_config.memory is mock_mem0_memory
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_mem0_memory_client():
|
||||
"""Fixture to create a mock MemoryClient instance"""
|
||||
mock_memory = MagicMock(spec=MemoryClient)
|
||||
return mock_memory
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mem0_storage_with_memory_client(mock_mem0_memory_client):
|
||||
"""Fixture to create a Mem0Storage instance with mocked dependencies"""
|
||||
|
||||
# We need to patch the MemoryClient before it's instantiated
|
||||
with patch.object(MemoryClient, '__new__', return_value=mock_mem0_memory_client):
|
||||
crew = MockCrew(
|
||||
memory_config={
|
||||
"provider": "mem0",
|
||||
"config": {"user_id": "test_user", "api_key": "ABCDEFGH", "org_id": "my_org_id", "project_id": "my_project_id"},
|
||||
}
|
||||
)
|
||||
|
||||
mem0_storage = Mem0Storage(type="short_term", crew=crew)
|
||||
return mem0_storage
|
||||
|
||||
|
||||
def test_mem0_storage_with_memory_client_initialization(mem0_storage_with_memory_client, mock_mem0_memory_client):
|
||||
"""Test Mem0Storage initialization with MemoryClient"""
|
||||
assert mem0_storage_with_memory_client.memory_type == "short_term"
|
||||
assert mem0_storage_with_memory_client.memory is mock_mem0_memory_client
|
||||
@@ -787,6 +787,25 @@ def test_conditional_task_definition_based_on_dict():
|
||||
assert task.agent is None
|
||||
|
||||
|
||||
def test_conditional_task_copy_preserves_type():
|
||||
task_config = {
|
||||
"description": "Give me an integer score between 1-5 for the following title: 'The impact of AI in the future of work', check examples to based your evaluation.",
|
||||
"expected_output": "The score of the title.",
|
||||
}
|
||||
original_task = Task(**task_config)
|
||||
copied_task = original_task.copy(agents=[], task_mapping={})
|
||||
assert isinstance(copied_task, Task)
|
||||
|
||||
original_conditional_config = {
|
||||
"description": "Give me an integer score between 1-5 for the following title: 'The impact of AI in the future of work'. Check examples to base your evaluation on.",
|
||||
"expected_output": "The score of the title.",
|
||||
"condition": lambda x: True,
|
||||
}
|
||||
original_conditional_task = ConditionalTask(**original_conditional_config)
|
||||
copied_conditional_task = original_conditional_task.copy(agents=[], task_mapping={})
|
||||
assert isinstance(copied_conditional_task, ConditionalTask)
|
||||
|
||||
|
||||
def test_interpolate_inputs():
|
||||
task = Task(
|
||||
description="Give me a list of 5 interesting ideas about {topic} to explore for an article, what makes them unique and interesting.",
|
||||
|
||||
46
tests/test_multimodal_validation.py
Normal file
46
tests/test_multimodal_validation.py
Normal file
@@ -0,0 +1,46 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai import LLM, Agent, Crew, Task
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="Only run manually with valid API keys")
|
||||
def test_multimodal_agent_with_image_url():
|
||||
"""
|
||||
Test that a multimodal agent can process images without validation errors.
|
||||
This test reproduces the scenario from issue #2475.
|
||||
"""
|
||||
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
||||
if not OPENAI_API_KEY:
|
||||
pytest.skip("OPENAI_API_KEY environment variable not set")
|
||||
|
||||
llm = LLM(
|
||||
model="openai/gpt-4o", # model with vision capabilities
|
||||
api_key=OPENAI_API_KEY,
|
||||
temperature=0.7
|
||||
)
|
||||
|
||||
expert_analyst = Agent(
|
||||
role="Visual Quality Inspector",
|
||||
goal="Perform detailed quality analysis of product images",
|
||||
backstory="Senior quality control expert with expertise in visual inspection",
|
||||
llm=llm,
|
||||
verbose=True,
|
||||
allow_delegation=False,
|
||||
multimodal=True
|
||||
)
|
||||
|
||||
inspection_task = Task(
|
||||
description="""
|
||||
Analyze the product image at https://www.us.maguireshoes.com/collections/spring-25/products/lucena-black-boot with focus on:
|
||||
1. Quality of materials
|
||||
2. Manufacturing defects
|
||||
3. Compliance with standards
|
||||
Provide a detailed report highlighting any issues found.
|
||||
""",
|
||||
expected_output="A detailed report highlighting any issues found",
|
||||
agent=expert_analyst
|
||||
)
|
||||
|
||||
crew = Crew(agents=[expert_analyst], tasks=[inspection_task])
|
||||
@@ -1,5 +1,7 @@
|
||||
import datetime
|
||||
import json
|
||||
import random
|
||||
import time
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
@@ -11,6 +13,7 @@ from crewai.tools.tool_usage import ToolUsage
|
||||
from crewai.utilities.events import crewai_event_bus
|
||||
from crewai.utilities.events.tool_usage_events import (
|
||||
ToolSelectionErrorEvent,
|
||||
ToolUsageFinishedEvent,
|
||||
ToolValidateInputErrorEvent,
|
||||
)
|
||||
|
||||
@@ -624,3 +627,161 @@ def test_tool_validate_input_error_event():
|
||||
assert event.agent_role == "test_role"
|
||||
assert event.tool_name == "test_tool"
|
||||
assert "must be a valid dictionary" in event.error
|
||||
|
||||
|
||||
def test_tool_usage_finished_event_with_result():
|
||||
"""Test that ToolUsageFinishedEvent is emitted with correct result attributes."""
|
||||
# Create mock agent with proper string values
|
||||
mock_agent = MagicMock()
|
||||
mock_agent.key = "test_agent_key"
|
||||
mock_agent.role = "test_agent_role"
|
||||
mock_agent._original_role = "test_agent_role"
|
||||
mock_agent.i18n = MagicMock()
|
||||
mock_agent.verbose = False
|
||||
|
||||
# Create mock task
|
||||
mock_task = MagicMock()
|
||||
mock_task.delegations = 0
|
||||
|
||||
# Create mock tool
|
||||
class TestTool(BaseTool):
|
||||
name: str = "Test Tool"
|
||||
description: str = "A test tool"
|
||||
|
||||
def _run(self, input: dict) -> str:
|
||||
return "test result"
|
||||
|
||||
test_tool = TestTool()
|
||||
|
||||
# Create mock tool calling
|
||||
mock_tool_calling = MagicMock()
|
||||
mock_tool_calling.arguments = {"arg1": "value1"}
|
||||
|
||||
# Create ToolUsage instance
|
||||
tool_usage = ToolUsage(
|
||||
tools_handler=MagicMock(),
|
||||
tools=[test_tool],
|
||||
original_tools=[test_tool],
|
||||
tools_description="Test Tool Description",
|
||||
tools_names="Test Tool",
|
||||
task=mock_task,
|
||||
function_calling_llm=None,
|
||||
agent=mock_agent,
|
||||
action=MagicMock(),
|
||||
)
|
||||
|
||||
# Track received events
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(ToolUsageFinishedEvent)
|
||||
def event_handler(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
# Call on_tool_use_finished with test data
|
||||
started_at = time.time()
|
||||
result = "test output result"
|
||||
tool_usage.on_tool_use_finished(
|
||||
tool=test_tool,
|
||||
tool_calling=mock_tool_calling,
|
||||
from_cache=False,
|
||||
started_at=started_at,
|
||||
result=result,
|
||||
)
|
||||
|
||||
# Verify event was emitted
|
||||
assert len(received_events) == 1, "Expected one event to be emitted"
|
||||
event = received_events[0]
|
||||
assert isinstance(event, ToolUsageFinishedEvent)
|
||||
|
||||
# Verify event attributes
|
||||
assert event.agent_key == "test_agent_key"
|
||||
assert event.agent_role == "test_agent_role"
|
||||
assert event.tool_name == "Test Tool"
|
||||
assert event.tool_args == {"arg1": "value1"}
|
||||
assert event.tool_class == "TestTool"
|
||||
assert event.run_attempts == 1 # Default value from ToolUsage
|
||||
assert event.delegations == 0
|
||||
assert event.from_cache is False
|
||||
assert event.output == "test output result"
|
||||
assert isinstance(event.started_at, datetime.datetime)
|
||||
assert isinstance(event.finished_at, datetime.datetime)
|
||||
assert event.type == "tool_usage_finished"
|
||||
|
||||
|
||||
def test_tool_usage_finished_event_with_cached_result():
|
||||
"""Test that ToolUsageFinishedEvent is emitted with correct result attributes when using cached result."""
|
||||
# Create mock agent with proper string values
|
||||
mock_agent = MagicMock()
|
||||
mock_agent.key = "test_agent_key"
|
||||
mock_agent.role = "test_agent_role"
|
||||
mock_agent._original_role = "test_agent_role"
|
||||
mock_agent.i18n = MagicMock()
|
||||
mock_agent.verbose = False
|
||||
|
||||
# Create mock task
|
||||
mock_task = MagicMock()
|
||||
mock_task.delegations = 0
|
||||
|
||||
# Create mock tool
|
||||
class TestTool(BaseTool):
|
||||
name: str = "Test Tool"
|
||||
description: str = "A test tool"
|
||||
|
||||
def _run(self, input: dict) -> str:
|
||||
return "test result"
|
||||
|
||||
test_tool = TestTool()
|
||||
|
||||
# Create mock tool calling
|
||||
mock_tool_calling = MagicMock()
|
||||
mock_tool_calling.arguments = {"arg1": "value1"}
|
||||
|
||||
# Create ToolUsage instance
|
||||
tool_usage = ToolUsage(
|
||||
tools_handler=MagicMock(),
|
||||
tools=[test_tool],
|
||||
original_tools=[test_tool],
|
||||
tools_description="Test Tool Description",
|
||||
tools_names="Test Tool",
|
||||
task=mock_task,
|
||||
function_calling_llm=None,
|
||||
agent=mock_agent,
|
||||
action=MagicMock(),
|
||||
)
|
||||
|
||||
# Track received events
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(ToolUsageFinishedEvent)
|
||||
def event_handler(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
# Call on_tool_use_finished with test data and from_cache=True
|
||||
started_at = time.time()
|
||||
result = "cached test output result"
|
||||
tool_usage.on_tool_use_finished(
|
||||
tool=test_tool,
|
||||
tool_calling=mock_tool_calling,
|
||||
from_cache=True,
|
||||
started_at=started_at,
|
||||
result=result,
|
||||
)
|
||||
|
||||
# Verify event was emitted
|
||||
assert len(received_events) == 1, "Expected one event to be emitted"
|
||||
event = received_events[0]
|
||||
assert isinstance(event, ToolUsageFinishedEvent)
|
||||
|
||||
# Verify event attributes
|
||||
assert event.agent_key == "test_agent_key"
|
||||
assert event.agent_role == "test_agent_role"
|
||||
assert event.tool_name == "Test Tool"
|
||||
assert event.tool_args == {"arg1": "value1"}
|
||||
assert event.tool_class == "TestTool"
|
||||
assert event.run_attempts == 1 # Default value from ToolUsage
|
||||
assert event.delegations == 0
|
||||
assert event.from_cache is True
|
||||
assert event.output == "cached test output result"
|
||||
assert isinstance(event.started_at, datetime.datetime)
|
||||
assert isinstance(event.finished_at, datetime.datetime)
|
||||
assert event.type == "tool_usage_finished"
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
from unittest.mock import Mock
|
||||
|
||||
from crewai.utilities.events.base_events import CrewEvent
|
||||
from crewai.utilities.events.base_events import BaseEvent
|
||||
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
|
||||
|
||||
|
||||
class TestEvent(CrewEvent):
|
||||
class TestEvent(BaseEvent):
|
||||
pass
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ def test_specific_event_handler():
|
||||
def test_wildcard_event_handler():
|
||||
mock_handler = Mock()
|
||||
|
||||
@crewai_event_bus.on(CrewEvent)
|
||||
@crewai_event_bus.on(BaseEvent)
|
||||
def handler(source, event):
|
||||
mock_handler(source, event)
|
||||
|
||||
|
||||
81
tests/utilities/test_chromadb_utils.py
Normal file
81
tests/utilities/test_chromadb_utils.py
Normal file
@@ -0,0 +1,81 @@
|
||||
import unittest
|
||||
from typing import Any, Dict, List, Union
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai.utilities.chromadb import (
|
||||
MAX_COLLECTION_LENGTH,
|
||||
MIN_COLLECTION_LENGTH,
|
||||
is_ipv4_pattern,
|
||||
sanitize_collection_name,
|
||||
)
|
||||
|
||||
|
||||
class TestChromadbUtils(unittest.TestCase):
|
||||
def test_sanitize_collection_name_long_name(self):
|
||||
"""Test sanitizing a very long collection name."""
|
||||
long_name = "This is an extremely long role name that will definitely exceed the ChromaDB collection name limit of 63 characters and cause an error when used as a collection name"
|
||||
sanitized = sanitize_collection_name(long_name)
|
||||
self.assertLessEqual(len(sanitized), MAX_COLLECTION_LENGTH)
|
||||
self.assertTrue(sanitized[0].isalnum())
|
||||
self.assertTrue(sanitized[-1].isalnum())
|
||||
self.assertTrue(all(c.isalnum() or c in ["_", "-"] for c in sanitized))
|
||||
|
||||
def test_sanitize_collection_name_special_chars(self):
|
||||
"""Test sanitizing a name with special characters."""
|
||||
special_chars = "Agent@123!#$%^&*()"
|
||||
sanitized = sanitize_collection_name(special_chars)
|
||||
self.assertTrue(sanitized[0].isalnum())
|
||||
self.assertTrue(sanitized[-1].isalnum())
|
||||
self.assertTrue(all(c.isalnum() or c in ["_", "-"] for c in sanitized))
|
||||
|
||||
def test_sanitize_collection_name_short_name(self):
|
||||
"""Test sanitizing a very short name."""
|
||||
short_name = "A"
|
||||
sanitized = sanitize_collection_name(short_name)
|
||||
self.assertGreaterEqual(len(sanitized), MIN_COLLECTION_LENGTH)
|
||||
self.assertTrue(sanitized[0].isalnum())
|
||||
self.assertTrue(sanitized[-1].isalnum())
|
||||
|
||||
def test_sanitize_collection_name_bad_ends(self):
|
||||
"""Test sanitizing a name with non-alphanumeric start/end."""
|
||||
bad_ends = "_Agent_"
|
||||
sanitized = sanitize_collection_name(bad_ends)
|
||||
self.assertTrue(sanitized[0].isalnum())
|
||||
self.assertTrue(sanitized[-1].isalnum())
|
||||
|
||||
def test_sanitize_collection_name_none(self):
|
||||
"""Test sanitizing a None value."""
|
||||
sanitized = sanitize_collection_name(None)
|
||||
self.assertEqual(sanitized, "default_collection")
|
||||
|
||||
def test_sanitize_collection_name_ipv4_pattern(self):
|
||||
"""Test sanitizing an IPv4 address."""
|
||||
ipv4 = "192.168.1.1"
|
||||
sanitized = sanitize_collection_name(ipv4)
|
||||
self.assertTrue(sanitized.startswith("ip_"))
|
||||
self.assertTrue(sanitized[0].isalnum())
|
||||
self.assertTrue(sanitized[-1].isalnum())
|
||||
self.assertTrue(all(c.isalnum() or c in ["_", "-"] for c in sanitized))
|
||||
|
||||
def test_is_ipv4_pattern(self):
|
||||
"""Test IPv4 pattern detection."""
|
||||
self.assertTrue(is_ipv4_pattern("192.168.1.1"))
|
||||
self.assertFalse(is_ipv4_pattern("not.an.ip.address"))
|
||||
|
||||
def test_sanitize_collection_name_properties(self):
|
||||
"""Test that sanitized collection names always meet ChromaDB requirements."""
|
||||
test_cases = [
|
||||
"A" * 100, # Very long name
|
||||
"_start_with_underscore",
|
||||
"end_with_underscore_",
|
||||
"contains@special#characters",
|
||||
"192.168.1.1", # IPv4 address
|
||||
"a" * 2, # Too short
|
||||
]
|
||||
for test_case in test_cases:
|
||||
sanitized = sanitize_collection_name(test_case)
|
||||
self.assertGreaterEqual(len(sanitized), MIN_COLLECTION_LENGTH)
|
||||
self.assertLessEqual(len(sanitized), MAX_COLLECTION_LENGTH)
|
||||
self.assertTrue(sanitized[0].isalnum())
|
||||
self.assertTrue(sanitized[-1].isalnum())
|
||||
@@ -5,8 +5,7 @@ from unittest.mock import Mock
|
||||
import pytest
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.flow import Flow
|
||||
from crewai.flow.state_utils import export_state, to_string
|
||||
from crewai.utilities.serialization import to_serializable, to_string
|
||||
|
||||
|
||||
class Address(BaseModel):
|
||||
@@ -23,16 +22,6 @@ class Person(BaseModel):
|
||||
skills: List[str]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_flow():
|
||||
def create_flow(state):
|
||||
flow = Mock(spec=Flow)
|
||||
flow._state = state
|
||||
return flow
|
||||
|
||||
return create_flow
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"test_input,expected",
|
||||
[
|
||||
@@ -47,9 +36,8 @@ def mock_flow():
|
||||
({"nested": [1, [2, 3], {4, 5}]}, {"nested": [1, [2, 3], [4, 5]]}),
|
||||
],
|
||||
)
|
||||
def test_basic_serialization(mock_flow, test_input, expected):
|
||||
flow = mock_flow(test_input)
|
||||
result = export_state(flow)
|
||||
def test_basic_serialization(test_input, expected):
|
||||
result = to_serializable(test_input)
|
||||
assert result == expected
|
||||
|
||||
|
||||
@@ -60,9 +48,8 @@ def test_basic_serialization(mock_flow, test_input, expected):
|
||||
(datetime(2024, 1, 1, 12, 30), "2024-01-01T12:30:00"),
|
||||
],
|
||||
)
|
||||
def test_temporal_serialization(mock_flow, input_date, expected):
|
||||
flow = mock_flow({"date": input_date})
|
||||
result = export_state(flow)
|
||||
def test_temporal_serialization(input_date, expected):
|
||||
result = to_serializable({"date": input_date})
|
||||
assert result["date"] == expected
|
||||
|
||||
|
||||
@@ -75,9 +62,8 @@ def test_temporal_serialization(mock_flow, input_date, expected):
|
||||
("normal", "value", str),
|
||||
],
|
||||
)
|
||||
def test_dictionary_key_serialization(mock_flow, key, value, expected_key_type):
|
||||
flow = mock_flow({key: value})
|
||||
result = export_state(flow)
|
||||
def test_dictionary_key_serialization(key, value, expected_key_type):
|
||||
result = to_serializable({key: value})
|
||||
assert len(result) == 1
|
||||
result_key = next(iter(result.keys()))
|
||||
assert isinstance(result_key, expected_key_type)
|
||||
@@ -91,14 +77,13 @@ def test_dictionary_key_serialization(mock_flow, key, value, expected_key_type):
|
||||
(str.upper, "upper"),
|
||||
],
|
||||
)
|
||||
def test_callable_serialization(mock_flow, callable_obj, expected_in_result):
|
||||
flow = mock_flow({"func": callable_obj})
|
||||
result = export_state(flow)
|
||||
def test_callable_serialization(callable_obj, expected_in_result):
|
||||
result = to_serializable({"func": callable_obj})
|
||||
assert isinstance(result["func"], str)
|
||||
assert expected_in_result in result["func"].lower()
|
||||
|
||||
|
||||
def test_pydantic_model_serialization(mock_flow):
|
||||
def test_pydantic_model_serialization():
|
||||
address = Address(street="123 Main St", city="Tech City", country="Pythonia")
|
||||
|
||||
person = Person(
|
||||
@@ -109,23 +94,21 @@ def test_pydantic_model_serialization(mock_flow):
|
||||
skills=["Python", "Testing"],
|
||||
)
|
||||
|
||||
flow = mock_flow(
|
||||
{
|
||||
"single_model": address,
|
||||
"nested_model": person,
|
||||
"model_list": [address, address],
|
||||
"model_dict": {"home": address},
|
||||
}
|
||||
)
|
||||
data = {
|
||||
"single_model": address,
|
||||
"nested_model": person,
|
||||
"model_list": [address, address],
|
||||
"model_dict": {"home": address},
|
||||
}
|
||||
|
||||
result = export_state(flow)
|
||||
result = to_serializable(data)
|
||||
assert (
|
||||
to_string(result)
|
||||
== '{"single_model": {"street": "123 Main St", "city": "Tech City", "country": "Pythonia"}, "nested_model": {"name": "John Doe", "age": 30, "address": {"street": "123 Main St", "city": "Tech City", "country": "Pythonia"}, "birthday": "1994-01-01", "skills": ["Python", "Testing"]}, "model_list": [{"street": "123 Main St", "city": "Tech City", "country": "Pythonia"}, {"street": "123 Main St", "city": "Tech City", "country": "Pythonia"}], "model_dict": {"home": {"street": "123 Main St", "city": "Tech City", "country": "Pythonia"}}}'
|
||||
)
|
||||
|
||||
|
||||
def test_depth_limit(mock_flow):
|
||||
def test_depth_limit():
|
||||
"""Test max depth handling with a deeply nested structure"""
|
||||
|
||||
def create_nested(depth):
|
||||
@@ -134,8 +117,7 @@ def test_depth_limit(mock_flow):
|
||||
return {"next": create_nested(depth - 1)}
|
||||
|
||||
deep_structure = create_nested(10)
|
||||
flow = mock_flow(deep_structure)
|
||||
result = export_state(flow)
|
||||
result = to_serializable(deep_structure)
|
||||
|
||||
assert result == {
|
||||
"next": {
|
||||
@@ -148,3 +130,23 @@ def test_depth_limit(mock_flow):
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def test_exclude_keys():
|
||||
result = to_serializable({"key1": "value1", "key2": "value2"}, exclude={"key1"})
|
||||
assert result == {"key2": "value2"}
|
||||
|
||||
model = Person(
|
||||
name="John Doe",
|
||||
age=30,
|
||||
address=Address(street="123 Main St", city="Tech City", country="Pythonia"),
|
||||
birthday=date(1994, 1, 1),
|
||||
skills=["Python", "Testing"],
|
||||
)
|
||||
result = to_serializable(model, exclude={"address"})
|
||||
assert result == {
|
||||
"name": "John Doe",
|
||||
"age": 30,
|
||||
"birthday": "1994-01-01",
|
||||
"skills": ["Python", "Testing"],
|
||||
}
|
||||
Reference in New Issue
Block a user