mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-05-08 02:29:00 +00:00
Merge branch 'main' into main
This commit is contained in:
@@ -465,11 +465,22 @@ Learn how to get the most out of your LLM configuration:
|
||||
# https://cloud.google.com/vertex-ai/generative-ai/docs/overview
|
||||
```
|
||||
|
||||
## GET CREDENTIALS
|
||||
file_path = 'path/to/vertex_ai_service_account.json'
|
||||
|
||||
# Load the JSON file
|
||||
with open(file_path, 'r') as file:
|
||||
vertex_credentials = json.load(file)
|
||||
|
||||
# Convert to JSON string
|
||||
vertex_credentials_json = json.dumps(vertex_credentials)
|
||||
|
||||
Example usage:
|
||||
```python Code
|
||||
llm = LLM(
|
||||
model="gemini/gemini-1.5-pro-latest",
|
||||
temperature=0.7
|
||||
temperature=0.7,
|
||||
vertex_credentials=vertex_credentials_json
|
||||
)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
@@ -519,7 +519,11 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
color="yellow",
|
||||
)
|
||||
self._handle_crew_training_output(initial_answer, feedback)
|
||||
self.messages.append(self._format_msg(f"Feedback: {feedback}"))
|
||||
self.messages.append(
|
||||
self._format_msg(
|
||||
self._i18n.slice("feedback_instructions").format(feedback=feedback)
|
||||
)
|
||||
)
|
||||
improved_answer = self._invoke_loop()
|
||||
self._handle_crew_training_output(improved_answer)
|
||||
self.ask_for_human_input = False
|
||||
@@ -566,7 +570,11 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
|
||||
def _process_feedback_iteration(self, feedback: str) -> AgentFinish:
|
||||
"""Process a single feedback iteration."""
|
||||
self.messages.append(self._format_msg(f"Feedback: {feedback}"))
|
||||
self.messages.append(
|
||||
self._format_msg(
|
||||
self._i18n.slice("feedback_instructions").format(feedback=feedback)
|
||||
)
|
||||
)
|
||||
return self._invoke_loop()
|
||||
|
||||
def _log_feedback_error(self, retry_count: int, error: Exception) -> None:
|
||||
|
||||
@@ -137,6 +137,7 @@ class LLM:
|
||||
api_version: Optional[str] = None,
|
||||
api_key: Optional[str] = None,
|
||||
callbacks: List[Any] = [],
|
||||
**kwargs,
|
||||
):
|
||||
self.model = model
|
||||
self.timeout = timeout
|
||||
@@ -158,6 +159,7 @@ class LLM:
|
||||
self.api_key = api_key
|
||||
self.callbacks = callbacks
|
||||
self.context_window_size = 0
|
||||
self.additional_params = kwargs
|
||||
|
||||
litellm.drop_params = True
|
||||
|
||||
@@ -240,6 +242,7 @@ class LLM:
|
||||
"api_key": self.api_key,
|
||||
"stream": False,
|
||||
"tools": tools,
|
||||
**self.additional_params,
|
||||
}
|
||||
|
||||
# Remove None values from params
|
||||
|
||||
@@ -24,7 +24,8 @@
|
||||
"manager_request": "Your best answer to your coworker asking you this, accounting for the context shared.",
|
||||
"formatted_task_instructions": "Ensure your final answer contains only the content in the following format: {output_format}\n\nEnsure the final output does not include any code block markers like ```json or ```python.",
|
||||
"human_feedback_classification": "Determine if the following feedback indicates that the user is satisfied or if further changes are needed. Respond with 'True' if further changes are needed, or 'False' if the user is satisfied. **Important** Do not include any additional commentary outside of your 'True' or 'False' response.\n\nFeedback: \"{feedback}\"",
|
||||
"conversation_history_instruction": "You are a member of a crew collaborating to achieve a common goal. Your task is a specific action that contributes to this larger objective. For additional context, please review the conversation history between you and the user that led to the initiation of this crew. Use any relevant information or feedback from the conversation to inform your task execution and ensure your response aligns with both the immediate task and the crew's overall goals."
|
||||
"conversation_history_instruction": "You are a member of a crew collaborating to achieve a common goal. Your task is a specific action that contributes to this larger objective. For additional context, please review the conversation history between you and the user that led to the initiation of this crew. Use any relevant information or feedback from the conversation to inform your task execution and ensure your response aligns with both the immediate task and the crew's overall goals.",
|
||||
"feedback_instructions": "User feedback: {feedback}\nInstructions: Use this feedback to enhance the next output iteration.\nNote: Do not respond or add commentary."
|
||||
},
|
||||
"errors": {
|
||||
"force_final_answer_error": "You can't keep going, here is the best final answer you generated:\n\n {formatted_answer}",
|
||||
|
||||
@@ -35,6 +35,4 @@ class CrewTrainingHandler(PickleHandler):
|
||||
def clear(self) -> None:
|
||||
"""Clear the training data by removing the file or resetting its contents."""
|
||||
if os.path.exists(self.file_path):
|
||||
with open(self.file_path, "wb") as file:
|
||||
# Overwrite with an empty dictionary
|
||||
self.save({})
|
||||
self.save({})
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
from time import sleep
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
@@ -154,3 +155,50 @@ def test_llm_call_with_tool_and_message_list():
|
||||
|
||||
assert isinstance(result, int)
|
||||
assert result == 25
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_llm_passes_additional_params():
|
||||
llm = LLM(
|
||||
model="gpt-4o-mini",
|
||||
vertex_credentials="test_credentials",
|
||||
vertex_project="test_project",
|
||||
)
|
||||
|
||||
messages = [{"role": "user", "content": "Hello, world!"}]
|
||||
|
||||
with patch("litellm.completion") as mocked_completion:
|
||||
# Create mocks for response structure
|
||||
mock_message = MagicMock()
|
||||
mock_message.content = "Test response"
|
||||
mock_choice = MagicMock()
|
||||
mock_choice.message = mock_message
|
||||
mock_response = MagicMock()
|
||||
mock_response.choices = [mock_choice]
|
||||
mock_response.usage = {
|
||||
"prompt_tokens": 5,
|
||||
"completion_tokens": 5,
|
||||
"total_tokens": 10,
|
||||
}
|
||||
|
||||
# Set up the mocked completion to return the mock response
|
||||
mocked_completion.return_value = mock_response
|
||||
|
||||
result = llm.call(messages)
|
||||
|
||||
# Assert that litellm.completion was called once
|
||||
mocked_completion.assert_called_once()
|
||||
|
||||
# Retrieve the actual arguments with which litellm.completion was called
|
||||
_, kwargs = mocked_completion.call_args
|
||||
|
||||
# Check that the additional_params were passed to litellm.completion
|
||||
assert kwargs["vertex_credentials"] == "test_credentials"
|
||||
assert kwargs["vertex_project"] == "test_project"
|
||||
|
||||
# Also verify that other expected parameters are present
|
||||
assert kwargs["model"] == "gpt-4o-mini"
|
||||
assert kwargs["messages"] == messages
|
||||
|
||||
# Check the result from llm.call
|
||||
assert result == "Test response"
|
||||
|
||||
Reference in New Issue
Block a user