feat: remove unused code and change ToolUsageStarted event place (#2581)

* feat: remove unused code and change ToolUsageStarted event place

* feat: run lint

* feat: add agent refernece inside liteagent

* feat: remove unused logic

* feat: Remove not needed event

* feat: remove test from tool execution erro:

* feat: remove cassete
This commit is contained in:
Eduardo Chiarotti
2025-04-11 14:26:59 -04:00
committed by GitHub
parent ea5ae9086a
commit 40a441f30e
8 changed files with 23 additions and 251 deletions

View File

@@ -1,112 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "user", "content": "Use the failing tool"}], "model":
"gpt-4o-mini", "stop": [], "tools": [{"type": "function", "function": {"name":
"failing_tool", "description": "This tool always fails.", "parameters": {"type":
"object", "properties": {"param": {"type": "string", "description": "A test
parameter"}}, "required": ["param"]}}}]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '353'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.61.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.61.0
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.8
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-B2P4zoJZuES7Aom8ugEq1modz5Vsl\",\n \"object\":
\"chat.completion\",\n \"created\": 1739912761,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
\ \"id\": \"call_F6fJxISpMKUBIGV6dd2vjRNG\",\n \"type\":
\"function\",\n \"function\": {\n \"name\": \"failing_tool\",\n
\ \"arguments\": \"{\\\"param\\\":\\\"test\\\"}\"\n }\n
\ }\n ],\n \"refusal\": null\n },\n \"logprobs\":
null,\n \"finish_reason\": \"tool_calls\"\n }\n ],\n \"usage\": {\n
\ \"prompt_tokens\": 51,\n \"completion_tokens\": 15,\n \"total_tokens\":
66,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\":
0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\":
0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\":
0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\":
\"fp_00428b782a\"\n}\n"
headers:
CF-RAY:
- 9140fa827f38eb1e-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 18 Feb 2025 21:06:02 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=xbuu3IQpCMh.43ZrqL1TRMECOc6QldgHV0hzOX1GrWI-1739912762-1.0.1.1-t7iyq5xMioPrwfeaHLvPT9rwRPp7Q9A9uIm69icH9dPxRD4xMA3cWqb1aXj1_e2IyAEQQWFe1UWjlmJ22aHh3Q;
path=/; expires=Tue, 18-Feb-25 21:36:02 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=x9l.Rhja8_wXDN.j8qcEU1PvvEqAwZp4Fd3s_aj4qwM-1739912762161-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '861'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999978'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_8666ec3aa6677cb346ba00993556051d
http_version: HTTP/1.1
status_code: 200
version: 1

View File

@@ -395,51 +395,3 @@ def test_deepseek_r1_with_open_router():
result = llm.call("What is the capital of France?")
assert isinstance(result, str)
assert "Paris" in result
@pytest.mark.vcr(filter_headers=["authorization"])
def test_tool_execution_error_event():
llm = LLM(model="gpt-4o-mini")
def failing_tool(param: str) -> str:
"""This tool always fails."""
raise Exception("Tool execution failed!")
tool_schema = {
"type": "function",
"function": {
"name": "failing_tool",
"description": "This tool always fails.",
"parameters": {
"type": "object",
"properties": {
"param": {"type": "string", "description": "A test parameter"}
},
"required": ["param"],
},
},
}
received_events = []
@crewai_event_bus.on(ToolExecutionErrorEvent)
def event_handler(source, event):
received_events.append(event)
available_functions = {"failing_tool": failing_tool}
messages = [{"role": "user", "content": "Use the failing tool"}]
llm.call(
messages,
tools=[tool_schema],
available_functions=available_functions,
)
assert len(received_events) == 1
event = received_events[0]
assert isinstance(event, ToolExecutionErrorEvent)
assert event.tool_name == "failing_tool"
assert event.tool_args == {"param": "test"}
assert event.tool_class == failing_tool
assert "Tool execution failed!" in event.error