mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-11 00:58:30 +00:00
Merge remote-tracking branch 'origin/main' into devin/1744196072-add-huggingface-provider
This commit is contained in:
@@ -1,112 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Use the failing tool"}], "model":
|
||||
"gpt-4o-mini", "stop": [], "tools": [{"type": "function", "function": {"name":
|
||||
"failing_tool", "description": "This tool always fails.", "parameters": {"type":
|
||||
"object", "properties": {"param": {"type": "string", "description": "A test
|
||||
parameter"}}, "required": ["param"]}}}]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '353'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.61.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.61.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.8
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
content: "{\n \"id\": \"chatcmpl-B2P4zoJZuES7Aom8ugEq1modz5Vsl\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1739912761,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
|
||||
\ \"id\": \"call_F6fJxISpMKUBIGV6dd2vjRNG\",\n \"type\":
|
||||
\"function\",\n \"function\": {\n \"name\": \"failing_tool\",\n
|
||||
\ \"arguments\": \"{\\\"param\\\":\\\"test\\\"}\"\n }\n
|
||||
\ }\n ],\n \"refusal\": null\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"tool_calls\"\n }\n ],\n \"usage\": {\n
|
||||
\ \"prompt_tokens\": 51,\n \"completion_tokens\": 15,\n \"total_tokens\":
|
||||
66,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\":
|
||||
0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\":
|
||||
0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\":
|
||||
0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\":
|
||||
\"fp_00428b782a\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 9140fa827f38eb1e-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 18 Feb 2025 21:06:02 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=xbuu3IQpCMh.43ZrqL1TRMECOc6QldgHV0hzOX1GrWI-1739912762-1.0.1.1-t7iyq5xMioPrwfeaHLvPT9rwRPp7Q9A9uIm69icH9dPxRD4xMA3cWqb1aXj1_e2IyAEQQWFe1UWjlmJ22aHh3Q;
|
||||
path=/; expires=Tue, 18-Feb-25 21:36:02 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=x9l.Rhja8_wXDN.j8qcEU1PvvEqAwZp4Fd3s_aj4qwM-1739912762161-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '861'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999978'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_8666ec3aa6677cb346ba00993556051d
|
||||
http_version: HTTP/1.1
|
||||
status_code: 200
|
||||
version: 1
|
||||
@@ -395,51 +395,3 @@ def test_deepseek_r1_with_open_router():
|
||||
result = llm.call("What is the capital of France?")
|
||||
assert isinstance(result, str)
|
||||
assert "Paris" in result
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_tool_execution_error_event():
|
||||
llm = LLM(model="gpt-4o-mini")
|
||||
|
||||
def failing_tool(param: str) -> str:
|
||||
"""This tool always fails."""
|
||||
raise Exception("Tool execution failed!")
|
||||
|
||||
tool_schema = {
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "failing_tool",
|
||||
"description": "This tool always fails.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"param": {"type": "string", "description": "A test parameter"}
|
||||
},
|
||||
"required": ["param"],
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
received_events = []
|
||||
|
||||
@crewai_event_bus.on(ToolExecutionErrorEvent)
|
||||
def event_handler(source, event):
|
||||
received_events.append(event)
|
||||
|
||||
available_functions = {"failing_tool": failing_tool}
|
||||
|
||||
messages = [{"role": "user", "content": "Use the failing tool"}]
|
||||
|
||||
llm.call(
|
||||
messages,
|
||||
tools=[tool_schema],
|
||||
available_functions=available_functions,
|
||||
)
|
||||
|
||||
assert len(received_events) == 1
|
||||
event = received_events[0]
|
||||
assert isinstance(event, ToolExecutionErrorEvent)
|
||||
assert event.tool_name == "failing_tool"
|
||||
assert event.tool_args == {"param": "test"}
|
||||
assert event.tool_class == failing_tool
|
||||
assert "Tool execution failed!" in event.error
|
||||
|
||||
@@ -29,7 +29,7 @@ def mem0_storage_with_mocked_config(mock_mem0_memory):
|
||||
"""Fixture to create a Mem0Storage instance with mocked dependencies"""
|
||||
|
||||
# Patch the Memory class to return our mock
|
||||
with patch("mem0.memory.main.Memory.from_config", return_value=mock_mem0_memory):
|
||||
with patch("mem0.memory.main.Memory.from_config", return_value=mock_mem0_memory) as mock_from_config:
|
||||
config = {
|
||||
"vector_store": {
|
||||
"provider": "mock_vector_store",
|
||||
@@ -66,13 +66,15 @@ def mem0_storage_with_mocked_config(mock_mem0_memory):
|
||||
)
|
||||
|
||||
mem0_storage = Mem0Storage(type="short_term", crew=crew)
|
||||
return mem0_storage
|
||||
return mem0_storage, mock_from_config, config
|
||||
|
||||
|
||||
def test_mem0_storage_initialization(mem0_storage_with_mocked_config, mock_mem0_memory):
|
||||
"""Test that Mem0Storage initializes correctly with the mocked config"""
|
||||
assert mem0_storage_with_mocked_config.memory_type == "short_term"
|
||||
assert mem0_storage_with_mocked_config.memory is mock_mem0_memory
|
||||
mem0_storage, mock_from_config, config = mem0_storage_with_mocked_config
|
||||
assert mem0_storage.memory_type == "short_term"
|
||||
assert mem0_storage.memory is mock_mem0_memory
|
||||
mock_from_config.assert_called_once_with(config)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
from typing import Callable
|
||||
import asyncio
|
||||
import inspect
|
||||
import unittest
|
||||
from typing import Any, Callable, Dict, List
|
||||
from unittest.mock import patch
|
||||
|
||||
from crewai.tools import BaseTool, tool
|
||||
|
||||
@@ -100,3 +104,91 @@ def test_default_cache_function_is_true():
|
||||
my_tool = MyCustomTool()
|
||||
# Assert all the right attributes were defined
|
||||
assert my_tool.cache_function()
|
||||
|
||||
|
||||
def test_result_as_answer_in_tool_decorator():
|
||||
@tool("Tool with result as answer", result_as_answer=True)
|
||||
def my_tool_with_result_as_answer(question: str) -> str:
|
||||
"""This tool will return its result as the final answer."""
|
||||
return question
|
||||
|
||||
assert my_tool_with_result_as_answer.result_as_answer is True
|
||||
|
||||
converted_tool = my_tool_with_result_as_answer.to_structured_tool()
|
||||
assert converted_tool.result_as_answer is True
|
||||
|
||||
@tool("Tool with default result_as_answer")
|
||||
def my_tool_with_default(question: str) -> str:
|
||||
"""This tool uses the default result_as_answer value."""
|
||||
return question
|
||||
|
||||
assert my_tool_with_default.result_as_answer is False
|
||||
|
||||
converted_tool = my_tool_with_default.to_structured_tool()
|
||||
assert converted_tool.result_as_answer is False
|
||||
|
||||
|
||||
class SyncTool(BaseTool):
|
||||
"""Test implementation with a synchronous _run method"""
|
||||
name: str = "sync_tool"
|
||||
description: str = "A synchronous tool for testing"
|
||||
|
||||
def _run(self, input_text: str) -> str:
|
||||
"""Process input text synchronously."""
|
||||
return f"Processed {input_text} synchronously"
|
||||
|
||||
|
||||
class AsyncTool(BaseTool):
|
||||
"""Test implementation with an asynchronous _run method"""
|
||||
name: str = "async_tool"
|
||||
description: str = "An asynchronous tool for testing"
|
||||
|
||||
async def _run(self, input_text: str) -> str:
|
||||
"""Process input text asynchronously."""
|
||||
await asyncio.sleep(0.1) # Simulate async operation
|
||||
return f"Processed {input_text} asynchronously"
|
||||
|
||||
|
||||
def test_sync_run_returns_direct_result():
|
||||
"""Test that _run in a synchronous tool returns a direct result, not a coroutine."""
|
||||
tool = SyncTool()
|
||||
result = tool._run(input_text="hello")
|
||||
|
||||
assert not asyncio.iscoroutine(result)
|
||||
assert result == "Processed hello synchronously"
|
||||
|
||||
run_result = tool.run(input_text="hello")
|
||||
assert run_result == "Processed hello synchronously"
|
||||
|
||||
|
||||
def test_async_run_returns_coroutine():
|
||||
"""Test that _run in an asynchronous tool returns a coroutine object."""
|
||||
tool = AsyncTool()
|
||||
result = tool._run(input_text="hello")
|
||||
|
||||
assert asyncio.iscoroutine(result)
|
||||
result.close() # Clean up the coroutine
|
||||
|
||||
|
||||
def test_run_calls_asyncio_run_for_async_tools():
|
||||
"""Test that asyncio.run is called when using async tools."""
|
||||
async_tool = AsyncTool()
|
||||
|
||||
with patch('asyncio.run') as mock_run:
|
||||
mock_run.return_value = "Processed test asynchronously"
|
||||
async_result = async_tool.run(input_text="test")
|
||||
|
||||
mock_run.assert_called_once()
|
||||
assert async_result == "Processed test asynchronously"
|
||||
|
||||
|
||||
def test_run_does_not_call_asyncio_run_for_sync_tools():
|
||||
"""Test that asyncio.run is NOT called when using sync tools."""
|
||||
sync_tool = SyncTool()
|
||||
|
||||
with patch('asyncio.run') as mock_run:
|
||||
sync_result = sync_tool.run(input_text="test")
|
||||
|
||||
mock_run.assert_not_called()
|
||||
assert sync_result == "Processed test synchronously"
|
||||
|
||||
|
||||
Reference in New Issue
Block a user