mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-28 18:28:30 +00:00
Compare commits
9 Commits
luzk-eng-4
...
bugfix-pyt
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a2224bbe18 | ||
|
|
37979a0ca1 | ||
|
|
d96543d314 | ||
|
|
52e10d6c84 | ||
|
|
f18a112cd7 | ||
|
|
40dcdb43d6 | ||
|
|
1167fbdd8c | ||
|
|
d200d00bb5 | ||
|
|
bf55dde358 |
8
.github/workflows/tests.yml
vendored
8
.github/workflows/tests.yml
vendored
@@ -12,6 +12,9 @@ jobs:
|
||||
tests:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ['3.10', '3.11', '3.12']
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
@@ -21,9 +24,8 @@ jobs:
|
||||
with:
|
||||
enable-cache: true
|
||||
|
||||
|
||||
- name: Set up Python
|
||||
run: uv python install 3.12.8
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
run: uv python install ${{ matrix.python-version }}
|
||||
|
||||
- name: Install the project
|
||||
run: uv sync --dev --all-extras
|
||||
|
||||
@@ -1043,6 +1043,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
raise
|
||||
|
||||
def _log_flow_event(
|
||||
self, message: str, color: str = "yellow", level: str = "info"
|
||||
|
||||
@@ -5,7 +5,6 @@ import sys
|
||||
import threading
|
||||
import warnings
|
||||
from contextlib import contextmanager
|
||||
from types import SimpleNamespace
|
||||
from typing import (
|
||||
Any,
|
||||
Dict,
|
||||
@@ -372,8 +371,6 @@ class LLM(BaseLLM):
|
||||
last_chunk = None
|
||||
chunk_count = 0
|
||||
usage_info = None
|
||||
tool_calls = None
|
||||
accumulated_tool_args = {} # Track tool call arguments by index
|
||||
|
||||
# --- 2) Make sure stream is set to True and include usage metrics
|
||||
params["stream"] = True
|
||||
@@ -395,6 +392,7 @@ class LLM(BaseLLM):
|
||||
if isinstance(chunk, dict) and "choices" in chunk:
|
||||
choices = chunk["choices"]
|
||||
elif hasattr(chunk, "choices"):
|
||||
# Check if choices is not a type but an actual attribute with value
|
||||
if not isinstance(getattr(chunk, "choices"), type):
|
||||
choices = getattr(chunk, "choices")
|
||||
|
||||
@@ -402,97 +400,14 @@ class LLM(BaseLLM):
|
||||
if isinstance(chunk, dict) and "usage" in chunk:
|
||||
usage_info = chunk["usage"]
|
||||
elif hasattr(chunk, "usage"):
|
||||
# Check if usage is not a type but an actual attribute with value
|
||||
if not isinstance(getattr(chunk, "usage"), type):
|
||||
usage_info = getattr(chunk, "usage")
|
||||
|
||||
if choices and len(choices) > 0:
|
||||
choice = choices[0]
|
||||
|
||||
# Check for tool calls in the chunk
|
||||
if "delta" in choice:
|
||||
delta = choice["delta"]
|
||||
if "tool_calls" in delta:
|
||||
tool_calls = delta["tool_calls"]
|
||||
# If we have tool calls and available functions, accumulate arguments
|
||||
if tool_calls and available_functions:
|
||||
for tool_call in tool_calls:
|
||||
if hasattr(tool_call, "index"):
|
||||
index = tool_call.index
|
||||
if index not in accumulated_tool_args:
|
||||
accumulated_tool_args[index] = {
|
||||
"name": None,
|
||||
"arguments": "",
|
||||
}
|
||||
|
||||
# Update tool call name if available
|
||||
if hasattr(
|
||||
tool_call, "function"
|
||||
) and hasattr(tool_call.function, "name"):
|
||||
if tool_call.function.name:
|
||||
accumulated_tool_args[index][
|
||||
"name"
|
||||
] = tool_call.function.name
|
||||
|
||||
# Accumulate arguments
|
||||
if hasattr(
|
||||
tool_call, "function"
|
||||
) and hasattr(
|
||||
tool_call.function, "arguments"
|
||||
):
|
||||
if tool_call.function.arguments:
|
||||
accumulated_tool_args[index][
|
||||
"arguments"
|
||||
] += tool_call.function.arguments
|
||||
|
||||
# Check if we have a complete tool call
|
||||
if (
|
||||
accumulated_tool_args[index]["name"]
|
||||
and accumulated_tool_args[index][
|
||||
"arguments"
|
||||
]
|
||||
):
|
||||
try:
|
||||
# Try to parse the accumulated arguments
|
||||
json.loads(
|
||||
accumulated_tool_args[index][
|
||||
"arguments"
|
||||
]
|
||||
)
|
||||
# Execute the tool call
|
||||
tool_result = self._handle_tool_call(
|
||||
[
|
||||
SimpleNamespace(
|
||||
function=SimpleNamespace(
|
||||
name=accumulated_tool_args[
|
||||
index
|
||||
][
|
||||
"name"
|
||||
],
|
||||
arguments=accumulated_tool_args[
|
||||
index
|
||||
][
|
||||
"arguments"
|
||||
],
|
||||
),
|
||||
)
|
||||
],
|
||||
available_functions,
|
||||
)
|
||||
|
||||
if tool_result is not None:
|
||||
# Stream the tool result
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LLMStreamChunkEvent(
|
||||
chunk=tool_result
|
||||
),
|
||||
)
|
||||
return tool_result
|
||||
except json.JSONDecodeError:
|
||||
# If JSON is incomplete, continue accumulating
|
||||
continue
|
||||
|
||||
# Handle different delta formats for content
|
||||
# Handle different delta formats
|
||||
delta = None
|
||||
if isinstance(choice, dict) and "delta" in choice:
|
||||
delta = choice["delta"]
|
||||
@@ -501,13 +416,17 @@ class LLM(BaseLLM):
|
||||
|
||||
# Extract content from delta
|
||||
if delta:
|
||||
# Handle dict format
|
||||
if isinstance(delta, dict):
|
||||
if "content" in delta and delta["content"] is not None:
|
||||
chunk_content = delta["content"]
|
||||
# Handle object format
|
||||
elif hasattr(delta, "content"):
|
||||
chunk_content = getattr(delta, "content")
|
||||
|
||||
# Handle case where content might be None or empty
|
||||
if chunk_content is None and isinstance(delta, dict):
|
||||
# Some models might send empty content chunks
|
||||
chunk_content = ""
|
||||
except Exception as e:
|
||||
logging.debug(f"Error extracting content from chunk: {e}")
|
||||
@@ -515,7 +434,10 @@ class LLM(BaseLLM):
|
||||
|
||||
# Only add non-None content to the response
|
||||
if chunk_content is not None:
|
||||
# Add the chunk content to the full response
|
||||
full_response += chunk_content
|
||||
|
||||
# Emit the chunk event
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LLMStreamChunkEvent(chunk=chunk_content),
|
||||
@@ -528,7 +450,9 @@ class LLM(BaseLLM):
|
||||
)
|
||||
non_streaming_params = params.copy()
|
||||
non_streaming_params["stream"] = False
|
||||
non_streaming_params.pop("stream_options", None)
|
||||
non_streaming_params.pop(
|
||||
"stream_options", None
|
||||
) # Remove stream_options for non-streaming call
|
||||
return self._handle_non_streaming_response(
|
||||
non_streaming_params, callbacks, available_functions
|
||||
)
|
||||
@@ -540,6 +464,7 @@ class LLM(BaseLLM):
|
||||
)
|
||||
if last_chunk is not None:
|
||||
try:
|
||||
# Try to extract content from the last chunk's message
|
||||
choices = None
|
||||
if isinstance(last_chunk, dict) and "choices" in last_chunk:
|
||||
choices = last_chunk["choices"]
|
||||
@@ -549,6 +474,8 @@ class LLM(BaseLLM):
|
||||
|
||||
if choices and len(choices) > 0:
|
||||
choice = choices[0]
|
||||
|
||||
# Try to get content from message
|
||||
message = None
|
||||
if isinstance(choice, dict) and "message" in choice:
|
||||
message = choice["message"]
|
||||
@@ -573,14 +500,57 @@ class LLM(BaseLLM):
|
||||
f"Last chunk format: {type(last_chunk)}, content: {last_chunk}"
|
||||
)
|
||||
|
||||
# --- 6) If still empty, raise an error
|
||||
# --- 6) If still empty, raise an error instead of using a default response
|
||||
if not full_response.strip():
|
||||
raise Exception(
|
||||
"No content received from streaming response. Received empty chunks or failed to extract content."
|
||||
)
|
||||
|
||||
# --- 7) Log token usage and emit completion event
|
||||
# --- 7) Check for tool calls in the final response
|
||||
tool_calls = None
|
||||
try:
|
||||
if last_chunk:
|
||||
choices = None
|
||||
if isinstance(last_chunk, dict) and "choices" in last_chunk:
|
||||
choices = last_chunk["choices"]
|
||||
elif hasattr(last_chunk, "choices"):
|
||||
if not isinstance(getattr(last_chunk, "choices"), type):
|
||||
choices = getattr(last_chunk, "choices")
|
||||
|
||||
if choices and len(choices) > 0:
|
||||
choice = choices[0]
|
||||
|
||||
message = None
|
||||
if isinstance(choice, dict) and "message" in choice:
|
||||
message = choice["message"]
|
||||
elif hasattr(choice, "message"):
|
||||
message = getattr(choice, "message")
|
||||
|
||||
if message:
|
||||
if isinstance(message, dict) and "tool_calls" in message:
|
||||
tool_calls = message["tool_calls"]
|
||||
elif hasattr(message, "tool_calls"):
|
||||
tool_calls = getattr(message, "tool_calls")
|
||||
except Exception as e:
|
||||
logging.debug(f"Error checking for tool calls: {e}")
|
||||
|
||||
# --- 8) If no tool calls or no available functions, return the text response directly
|
||||
if not tool_calls or not available_functions:
|
||||
# Log token usage if available in streaming mode
|
||||
self._handle_streaming_callbacks(callbacks, usage_info, last_chunk)
|
||||
# Emit completion event and return response
|
||||
self._handle_emit_call_events(full_response, LLMCallType.LLM_CALL)
|
||||
return full_response
|
||||
|
||||
# --- 9) Handle tool calls if present
|
||||
tool_result = self._handle_tool_call(tool_calls, available_functions)
|
||||
if tool_result is not None:
|
||||
return tool_result
|
||||
|
||||
# --- 10) Log token usage if available in streaming mode
|
||||
self._handle_streaming_callbacks(callbacks, usage_info, last_chunk)
|
||||
|
||||
# --- 11) Emit completion event and return response
|
||||
self._handle_emit_call_events(full_response, LLMCallType.LLM_CALL)
|
||||
return full_response
|
||||
|
||||
@@ -591,6 +561,7 @@ class LLM(BaseLLM):
|
||||
self._handle_emit_call_events(full_response, LLMCallType.LLM_CALL)
|
||||
return full_response
|
||||
|
||||
# Emit failed event and re-raise the exception
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LLMCallFailedEvent(error=str(e)),
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import TYPE_CHECKING, Any, Dict, Optional, Self
|
||||
from typing import TYPE_CHECKING, Any, Dict, Optional
|
||||
|
||||
from crewai.memory.external.external_memory_item import ExternalMemoryItem
|
||||
from crewai.memory.memory import Memory
|
||||
@@ -52,7 +52,7 @@ class ExternalMemory(Memory):
|
||||
def reset(self) -> None:
|
||||
self.storage.reset()
|
||||
|
||||
def set_crew(self, crew: Any) -> Self:
|
||||
def set_crew(self, crew: Any) -> "ExternalMemory":
|
||||
super().set_crew(crew)
|
||||
|
||||
if not self.storage:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, Dict, List, Optional, Self
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
@@ -38,6 +38,6 @@ class Memory(BaseModel):
|
||||
query=query, limit=limit, score_threshold=score_threshold
|
||||
)
|
||||
|
||||
def set_crew(self, crew: Any) -> Self:
|
||||
def set_crew(self, crew: Any) -> "Memory":
|
||||
self.crew = crew
|
||||
return self
|
||||
|
||||
Reference in New Issue
Block a user