mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-16 04:18:35 +00:00
Fix A2A LLM parameter forwarding in InternalInstructor
- Forward LLM configuration parameters (api_key, api_base, temperature, etc.) to instructor client for LiteLLM instances - Only forward parameters for LiteLLM instances (is_litellm=True) to avoid breaking non-LiteLLM paths - Filter out None values to prevent errors - Prefer max_tokens over max_completion_tokens when both are present - Fixes issue #3927 where A2A delegation lost LLM configuration when checking if remote agents are relevant Co-Authored-By: João <joao@crewai.com>
This commit is contained in:
@@ -145,6 +145,39 @@ class InternalInstructor(Generic[T]):
|
||||
else:
|
||||
model_name = self.llm.model
|
||||
|
||||
return self._client.chat.completions.create( # type: ignore[no-any-return]
|
||||
model=model_name, response_model=self.model, messages=messages
|
||||
)
|
||||
params: dict[str, Any] = {
|
||||
"model": model_name,
|
||||
"response_model": self.model,
|
||||
"messages": messages,
|
||||
}
|
||||
|
||||
if not isinstance(self.llm, str) and hasattr(self.llm, "is_litellm") and self.llm.is_litellm:
|
||||
param_names = [
|
||||
"api_key",
|
||||
"api_base",
|
||||
"base_url",
|
||||
"api_version",
|
||||
"temperature",
|
||||
"top_p",
|
||||
"n",
|
||||
"stop",
|
||||
"max_tokens",
|
||||
"max_completion_tokens",
|
||||
"timeout",
|
||||
"presence_penalty",
|
||||
"frequency_penalty",
|
||||
"logit_bias",
|
||||
"seed",
|
||||
"logprobs",
|
||||
"top_logprobs",
|
||||
"reasoning_effort",
|
||||
]
|
||||
|
||||
for param_name in param_names:
|
||||
value = getattr(self.llm, param_name, None)
|
||||
if value is not None:
|
||||
if param_name == "max_completion_tokens" and "max_tokens" in params:
|
||||
continue
|
||||
params[param_name] = value
|
||||
|
||||
return self._client.chat.completions.create(**params) # type: ignore[no-any-return]
|
||||
|
||||
Reference in New Issue
Block a user