diff --git a/src/crewai/llm.py b/src/crewai/llm.py index c1e52f2cd..69780af8c 100644 --- a/src/crewai/llm.py +++ b/src/crewai/llm.py @@ -231,7 +231,7 @@ class LLM: :return: Final text response from the LLM or the tool result """ with suppress_warnings(): - if callbacks: + if callbacks and len(callbacks) > 0: self.set_callbacks(callbacks) try: @@ -259,7 +259,6 @@ class LLM: "tools": tools, # pass the tool schema } - # Remove None values params = {k: v for k, v in params.items() if v is not None} response = litellm.completion(**params) @@ -290,8 +289,6 @@ class LLM: # Call the actual tool function result = fn(**function_args) - print(f"Result from function '{function_name}': {result}") - # Return the result directly return result @@ -368,20 +365,36 @@ class LLM: def set_env_callbacks(self): """ Sets the success and failure callbacks for the LiteLLM library from environment variables. + + This method reads the `LITELLM_SUCCESS_CALLBACKS` and `LITELLM_FAILURE_CALLBACKS` + environment variables, which should contain comma-separated lists of callback names. + It then assigns these lists to `litellm.success_callback` and `litellm.failure_callback`, + respectively. + + If the environment variables are not set or are empty, the corresponding callback lists + will be set to empty lists. + + Example: + LITELLM_SUCCESS_CALLBACKS="langfuse,langsmith" + LITELLM_FAILURE_CALLBACKS="langfuse" + + This will set `litellm.success_callback` to ["langfuse", "langsmith"] and + `litellm.failure_callback` to ["langfuse"]. """ - success_callbacks_str = os.environ.get("LITELLM_SUCCESS_CALLBACKS", "") - success_callbacks = [] - if success_callbacks_str: - success_callbacks = [ - cb.strip() for cb in success_callbacks_str.split(",") if cb.strip() - ] + with suppress_warnings(): + success_callbacks_str = os.environ.get("LITELLM_SUCCESS_CALLBACKS", "") + success_callbacks = [] + if success_callbacks_str: + success_callbacks = [ + cb.strip() for cb in success_callbacks_str.split(",") if cb.strip() + ] - failure_callbacks_str = os.environ.get("LITELLM_FAILURE_CALLBACKS", "") - failure_callbacks = [] - if failure_callbacks_str: - failure_callbacks = [ - cb.strip() for cb in failure_callbacks_str.split(",") if cb.strip() - ] + failure_callbacks_str = os.environ.get("LITELLM_FAILURE_CALLBACKS", "") + failure_callbacks = [] + if failure_callbacks_str: + failure_callbacks = [ + cb.strip() for cb in failure_callbacks_str.split(",") if cb.strip() + ] - litellm.success_callback = success_callbacks - litellm.failure_callback = failure_callbacks + litellm.success_callback = success_callbacks + litellm.failure_callback = failure_callbacks diff --git a/src/crewai/utilities/llm_utils.py b/src/crewai/utilities/llm_utils.py index a15b1417f..ea1783fa9 100644 --- a/src/crewai/utilities/llm_utils.py +++ b/src/crewai/utilities/llm_utils.py @@ -171,21 +171,25 @@ def _llm_via_environment_or_fallback() -> Optional[LLM]: set_provider = model_name.split("/")[0] if "/" in model_name else "openai" if set_provider in ENV_VARS: - for env_var in ENV_VARS[set_provider]: - key_name = env_var.get("key_name") - if key_name and key_name not in UNACCEPTED_ATTRIBUTES: - env_value = os.environ.get(key_name) - if env_value: - # Map environment variable names to recognized parameters - param_key = _normalize_key_name(key_name.lower()) - llm_params[param_key] = env_value - elif isinstance(env_var, dict): - if env_var.get("default", False): - for key, value in env_var.items(): - if key not in ["prompt", "key_name", "default"]: - llm_params[key.lower()] = value - else: - print(f"Expected env_var to be a dictionary, but got {type(env_var)}") + env_vars_for_provider = ENV_VARS[set_provider] + if isinstance(env_vars_for_provider, (list, tuple)): + for env_var in env_vars_for_provider: + key_name = env_var.get("key_name") + if key_name and key_name not in UNACCEPTED_ATTRIBUTES: + env_value = os.environ.get(key_name) + if env_value: + # Map environment variable names to recognized parameters + param_key = _normalize_key_name(key_name.lower()) + llm_params[param_key] = env_value + elif isinstance(env_var, dict): + if env_var.get("default", False): + for key, value in env_var.items(): + if key not in ["prompt", "key_name", "default"]: + llm_params[key.lower()] = value + else: + print( + f"Expected env_var to be a dictionary, but got {type(env_var)}" + ) # Remove None values llm_params = {k: v for k, v in llm_params.items() if v is not None}