Compare commits

...

2 Commits

Author SHA1 Message Date
Devin AI
8cc63e7925 Remove unused type ignore comment
Co-Authored-By: João <joao@crewai.com>
2025-11-16 10:42:14 +00:00
Devin AI
f9ae6c52de Fix A2A LLM parameter forwarding in InternalInstructor
- Forward LLM configuration parameters (api_key, api_base, temperature, etc.) to instructor client for LiteLLM instances
- Only forward parameters for LiteLLM instances (is_litellm=True) to avoid breaking non-LiteLLM paths
- Filter out None values to prevent errors
- Prefer max_tokens over max_completion_tokens when both are present
- Fixes issue #3927 where A2A delegation lost LLM configuration when checking if remote agents are relevant

Co-Authored-By: João <joao@crewai.com>
2025-11-16 10:38:50 +00:00

View File

@@ -60,7 +60,7 @@ class InternalInstructor(Generic[T]):
self.llm = llm or (agent.function_calling_llm or agent.llm if agent else None)
with suppress_warnings():
import instructor # type: ignore[import-untyped]
import instructor
if (
self.llm is not None
@@ -145,6 +145,39 @@ class InternalInstructor(Generic[T]):
else:
model_name = self.llm.model
return self._client.chat.completions.create( # type: ignore[no-any-return]
model=model_name, response_model=self.model, messages=messages
)
params: dict[str, Any] = {
"model": model_name,
"response_model": self.model,
"messages": messages,
}
if not isinstance(self.llm, str) and hasattr(self.llm, "is_litellm") and self.llm.is_litellm:
param_names = [
"api_key",
"api_base",
"base_url",
"api_version",
"temperature",
"top_p",
"n",
"stop",
"max_tokens",
"max_completion_tokens",
"timeout",
"presence_penalty",
"frequency_penalty",
"logit_bias",
"seed",
"logprobs",
"top_logprobs",
"reasoning_effort",
]
for param_name in param_names:
value = getattr(self.llm, param_name, None)
if value is not None:
if param_name == "max_completion_tokens" and "max_tokens" in params:
continue
params[param_name] = value
return self._client.chat.completions.create(**params) # type: ignore[no-any-return]