Suppressed userWarnings from litellm pydantic issues (#1833)

* Suppressed userWarnings from litellm pydantic issues

* change litellm version

* Fix failling ollama tasks
This commit is contained in:
Brandon Hancock (bhancock_ai)
2024-12-31 16:40:51 -05:00
committed by GitHub
parent 4469461b38
commit ba89e43b62
12 changed files with 2238 additions and 543 deletions

View File

@@ -726,11 +726,7 @@ class Crew(BaseModel):
# Determine which tools to use - task tools take precedence over agent tools
tools_for_task = task.tools or agent_to_use.tools or []
tools_for_task = self._prepare_tools(
agent_to_use,
task,
tools_for_task
)
tools_for_task = self._prepare_tools(agent_to_use, task, tools_for_task)
self._log_task_start(task, agent_to_use.role)
@@ -797,14 +793,18 @@ class Crew(BaseModel):
return skipped_task_output
return None
def _prepare_tools(self, agent: BaseAgent, task: Task, tools: List[Tool]) -> List[Tool]:
def _prepare_tools(
self, agent: BaseAgent, task: Task, tools: List[Tool]
) -> List[Tool]:
# Add delegation tools if agent allows delegation
if agent.allow_delegation:
if self.process == Process.hierarchical:
if self.manager_agent:
tools = self._update_manager_tools(task, tools)
else:
raise ValueError("Manager agent is required for hierarchical process.")
raise ValueError(
"Manager agent is required for hierarchical process."
)
elif agent and agent.allow_delegation:
tools = self._add_delegation_tools(task, tools)
@@ -823,7 +823,9 @@ class Crew(BaseModel):
return self.manager_agent
return task.agent
def _merge_tools(self, existing_tools: List[Tool], new_tools: List[Tool]) -> List[Tool]:
def _merge_tools(
self, existing_tools: List[Tool], new_tools: List[Tool]
) -> List[Tool]:
"""Merge new tools into existing tools list, avoiding duplicates by tool name."""
if not new_tools:
return existing_tools
@@ -839,7 +841,9 @@ class Crew(BaseModel):
return tools
def _inject_delegation_tools(self, tools: List[Tool], task_agent: BaseAgent, agents: List[BaseAgent]):
def _inject_delegation_tools(
self, tools: List[Tool], task_agent: BaseAgent, agents: List[BaseAgent]
):
delegation_tools = task_agent.get_delegation_tools(agents)
return self._merge_tools(tools, delegation_tools)
@@ -856,7 +860,9 @@ class Crew(BaseModel):
if len(self.agents) > 1 and len(agents_for_delegation) > 0 and task.agent:
if not tools:
tools = []
tools = self._inject_delegation_tools(tools, task.agent, agents_for_delegation)
tools = self._inject_delegation_tools(
tools, task.agent, agents_for_delegation
)
return tools
def _log_task_start(self, task: Task, role: str = "None"):
@@ -870,7 +876,9 @@ class Crew(BaseModel):
if task.agent:
tools = self._inject_delegation_tools(tools, task.agent, [task.agent])
else:
tools = self._inject_delegation_tools(tools, self.manager_agent, self.agents)
tools = self._inject_delegation_tools(
tools, self.manager_agent, self.agents
)
return tools
def _get_context(self, task: Task, task_outputs: List[TaskOutput]):

View File

@@ -6,8 +6,10 @@ import warnings
from contextlib import contextmanager
from typing import Any, Dict, List, Optional, Union
import litellm
from litellm import get_supported_openai_params
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
import litellm
from litellm import get_supported_openai_params
from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededException,
@@ -138,7 +140,7 @@ class LLM:
self.kwargs = kwargs
litellm.drop_params = True
litellm.set_verbose = False
self.set_callbacks(callbacks)
self.set_env_callbacks()

View File

@@ -1,3 +1,4 @@
import warnings
from typing import Any, Optional, Type
@@ -25,9 +26,10 @@ class InternalInstructor:
if self.agent and not self.llm:
self.llm = self.agent.function_calling_llm or self.agent.llm
# Lazy import
import instructor
from litellm import completion
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
import instructor
from litellm import completion
self._client = instructor.from_litellm(
completion,

View File

@@ -1,3 +1,5 @@
import warnings
from litellm.integrations.custom_logger import CustomLogger
from litellm.types.utils import Usage
@@ -12,11 +14,13 @@ class TokenCalcHandler(CustomLogger):
if self.token_cost_process is None:
return
usage: Usage = response_obj["usage"]
self.token_cost_process.sum_successful_requests(1)
self.token_cost_process.sum_prompt_tokens(usage.prompt_tokens)
self.token_cost_process.sum_completion_tokens(usage.completion_tokens)
if usage.prompt_tokens_details:
self.token_cost_process.sum_cached_prompt_tokens(
usage.prompt_tokens_details.cached_tokens
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
usage: Usage = response_obj["usage"]
self.token_cost_process.sum_successful_requests(1)
self.token_cost_process.sum_prompt_tokens(usage.prompt_tokens)
self.token_cost_process.sum_completion_tokens(usage.completion_tokens)
if usage.prompt_tokens_details:
self.token_cost_process.sum_cached_prompt_tokens(
usage.prompt_tokens_details.cached_tokens
)