Compare commits

..

7 Commits

Author SHA1 Message Date
Devin AI
06d922f452 Fix import sorting in test file using ruff
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-26 09:22:50 +00:00
Devin AI
3e8635f1e7 Fix import sorting in test file again
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-26 09:21:22 +00:00
Devin AI
e8a1169b85 Fix import sorting in test file
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-26 09:19:58 +00:00
Devin AI
64c1c4efa1 Fix linting error in test file
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-26 09:19:02 +00:00
Devin AI
46b5fc6538 Fix issue #2237: Properly handle LLM output with both Action and Final Answer
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-26 09:17:22 +00:00
Fernando Galves
34d2993456 Update the constants.py file adding the list of foundation models available in Amazon Bedrock (#2170)
* Update constants.py

This PR updates the list of foundation models available in Amazon Bedrock to reflect the latest offerings.

* Update constants.py with inference profiles

Add the cross-region inference profiles to increase throughput and improve resiliency by routing your requests across multiple AWS Regions during peak utilization bursts.

* Update constants.py

Fix the model order

---------

Co-authored-by: Brandon Hancock (bhancock_ai) <109994880+bhancockio@users.noreply.github.com>
2025-02-25 15:39:23 -05:00
devin-ai-integration[bot]
e3c5c174ee feat: add context window size for o3-mini model (#2192)
* feat: add context window size for o3-mini model

Fixes #2191

Co-Authored-By: Joe Moura <joao@crewai.com>

* feat: add context window validation and tests

- Add validation for context window size bounds (1024-2097152)
- Add test for context window validation
- Fix test import error

Co-Authored-By: Joe Moura <joao@crewai.com>

* style: fix import sorting in llm_test.py

Co-Authored-By: Joe Moura <joao@crewai.com>

---------

Co-authored-by: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com>
Co-authored-by: Joe Moura <joao@crewai.com>
Co-authored-by: Brandon Hancock (bhancock_ai) <109994880+bhancockio@users.noreply.github.com>
2025-02-25 15:32:14 -05:00
3 changed files with 126 additions and 4 deletions

View File

@@ -232,7 +232,14 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
self._format_answer(answer)
except OutputParserException as e:
if FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE in e.error:
answer = answer.split("Observation:")[0].strip()
# If both Action and Final Answer are present, prioritize the Action
# by removing the Final Answer part
if "Final Answer:" in answer:
parts = answer.split("Final Answer:")
answer = parts[0].strip()
# If that doesn't work, try splitting at Observation
elif "Observation:" in answer:
answer = answer.split("Observation:")[0].strip()
return self._format_answer(answer)

View File

@@ -216,10 +216,43 @@ MODELS = {
"watsonx/ibm/granite-3-8b-instruct",
],
"bedrock": [
"bedrock/us.amazon.nova-pro-v1:0",
"bedrock/us.amazon.nova-micro-v1:0",
"bedrock/us.amazon.nova-lite-v1:0",
"bedrock/us.anthropic.claude-3-5-sonnet-20240620-v1:0",
"bedrock/us.anthropic.claude-3-5-haiku-20241022-v1:0",
"bedrock/us.anthropic.claude-3-5-sonnet-20241022-v2:0",
"bedrock/us.anthropic.claude-3-7-sonnet-20250219-v1:0",
"bedrock/us.anthropic.claude-3-sonnet-20240229-v1:0",
"bedrock/us.anthropic.claude-3-opus-20240229-v1:0",
"bedrock/us.anthropic.claude-3-haiku-20240307-v1:0",
"bedrock/us.meta.llama3-2-11b-instruct-v1:0",
"bedrock/us.meta.llama3-2-3b-instruct-v1:0",
"bedrock/us.meta.llama3-2-90b-instruct-v1:0",
"bedrock/us.meta.llama3-2-1b-instruct-v1:0",
"bedrock/us.meta.llama3-1-8b-instruct-v1:0",
"bedrock/us.meta.llama3-1-70b-instruct-v1:0",
"bedrock/us.meta.llama3-3-70b-instruct-v1:0",
"bedrock/us.meta.llama3-1-405b-instruct-v1:0",
"bedrock/eu.anthropic.claude-3-5-sonnet-20240620-v1:0",
"bedrock/eu.anthropic.claude-3-sonnet-20240229-v1:0",
"bedrock/eu.anthropic.claude-3-haiku-20240307-v1:0",
"bedrock/eu.meta.llama3-2-3b-instruct-v1:0",
"bedrock/eu.meta.llama3-2-1b-instruct-v1:0",
"bedrock/apac.anthropic.claude-3-5-sonnet-20240620-v1:0",
"bedrock/apac.anthropic.claude-3-5-sonnet-20241022-v2:0",
"bedrock/apac.anthropic.claude-3-sonnet-20240229-v1:0",
"bedrock/apac.anthropic.claude-3-haiku-20240307-v1:0",
"bedrock/amazon.nova-pro-v1:0",
"bedrock/amazon.nova-micro-v1:0",
"bedrock/amazon.nova-lite-v1:0",
"bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
"bedrock/anthropic.claude-3-5-haiku-20241022-v1:0",
"bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0",
"bedrock/anthropic.claude-3-7-sonnet-20250219-v1:0",
"bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
"bedrock/anthropic.claude-3-haiku-20240307-v1:0",
"bedrock/anthropic.claude-3-opus-20240229-v1:0",
"bedrock/anthropic.claude-3-haiku-20240307-v1:0",
"bedrock/anthropic.claude-v2:1",
"bedrock/anthropic.claude-v2",
"bedrock/anthropic.claude-instant-v1",
@@ -234,8 +267,6 @@ MODELS = {
"bedrock/ai21.j2-mid-v1",
"bedrock/ai21.j2-ultra-v1",
"bedrock/ai21.jamba-instruct-v1:0",
"bedrock/meta.llama2-13b-chat-v1",
"bedrock/meta.llama2-70b-chat-v1",
"bedrock/mistral.mistral-7b-instruct-v0:2",
"bedrock/mistral.mixtral-8x7b-instruct-v0:1",
],

View File

@@ -0,0 +1,84 @@
from unittest.mock import MagicMock
import pytest
from crewai.agents.crew_agent_executor import CrewAgentExecutor
from crewai.agents.parser import (
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE,
AgentAction,
AgentFinish,
OutputParserException,
)
def test_process_llm_response_with_action_and_final_answer():
"""Test that _process_llm_response correctly handles outputs with both Action and Final Answer."""
# Create a mock LLM
mock_llm = MagicMock()
mock_llm.supports_stop_words.return_value = False
# Create a mock agent
mock_agent = MagicMock()
# Create a CrewAgentExecutor instance
executor = CrewAgentExecutor(
llm=mock_llm,
task=MagicMock(),
crew=MagicMock(),
agent=mock_agent,
prompt={},
max_iter=5,
tools=[],
tools_names="",
stop_words=[],
tools_description="",
tools_handler=MagicMock(),
)
# Test case 1: Output with both Action and Final Answer, with Final Answer after Action
output_with_both = """
Thought: I need to search for information and then provide an answer.
Action: search
Action Input: what is the temperature in SF?
Final Answer: The temperature is 100 degrees
"""
# Mock the _format_answer method to first raise an exception and then return a valid result
format_answer_mock = MagicMock()
format_answer_mock.side_effect = [
OutputParserException(FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE),
AgentAction(thought="", tool="search", tool_input="what is the temperature in SF?", text=""),
]
executor._format_answer = format_answer_mock
# Process the response
result = executor._process_llm_response(output_with_both)
# Verify that the result is an AgentAction
assert isinstance(result, AgentAction)
assert result.tool == "search"
assert result.tool_input == "what is the temperature in SF?"
# Test case 2: Output with both Action and Final Answer, with Observation in between
output_with_observation = """
Thought: I need to search for information.
Action: search
Action Input: what is the temperature in SF?
Observation: The temperature in SF is 100 degrees.
Final Answer: The temperature is 100 degrees
"""
# Reset the mock
format_answer_mock.reset_mock()
format_answer_mock.side_effect = [
OutputParserException(FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE),
AgentAction(thought="", tool="search", tool_input="what is the temperature in SF?", text=""),
]
# Process the response
result = executor._process_llm_response(output_with_observation)
# Verify that the result is an AgentAction
assert isinstance(result, AgentAction)
assert result.tool == "search"
assert result.tool_input == "what is the temperature in SF?"