Compare commits

..

1 Commits

Author SHA1 Message Date
Devin AI
8c0e7d235e Fix Deepseek-v3-250324 empty responses issue (#2657)
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-04-22 11:18:44 +00:00
6 changed files with 50 additions and 28 deletions

View File

@@ -790,9 +790,6 @@ Visualizing your AI workflows can provide valuable insights into the structure a
Plots in CrewAI are graphical representations of your AI workflows. They display the various tasks, their connections, and the flow of data between them. This visualization helps in understanding the sequence of operations, identifying bottlenecks, and ensuring that the workflow logic aligns with your expectations.
![Example of a Flow Plot](/images/flow_plot_example.png)
*An example visualization of a simple flow with start method, sequential steps, and directional execution.*
### How to Generate a Plot
CrewAI provides two convenient methods to generate plots of your flows:

View File

@@ -8,9 +8,6 @@
"dark": "#C94C3C"
},
"favicon": "favicon.svg",
"contextual": {
"options": ["copy", "view", "chatgpt", "claude"]
},
"navigation": {
"tabs": [
{
@@ -247,12 +244,7 @@
"prompt": "Search CrewAI docs"
},
"seo": {
"indexing": "all"
},
"errors": {
"404": {
"redirect": true
}
"indexing": "navigable"
},
"footer": {
"socials": {

Binary file not shown.

Before

Width:  |  Height:  |  Size: 30 KiB

View File

@@ -117,9 +117,7 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
published_handle = publish_response.json()["handle"]
console.print(
f"Successfully published `{published_handle}` ({project_version}).\n\n"
+ "⚠️ Security checks are running in the background. Your tool will be available once these are complete.\n"
+ f"You can monitor the status or access your tool here:\nhttps://app.crewai.com/crewai_plus/tools/{published_handle}",
f"Successfully published {published_handle} ({project_version}).\nInstall it in other projects with crewai tool install {published_handle}",
style="bold green",
)
@@ -155,12 +153,8 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
login_response_json = login_response.json()
settings = Settings()
settings.tool_repository_username = login_response_json["credential"][
"username"
]
settings.tool_repository_password = login_response_json["credential"][
"password"
]
settings.tool_repository_username = login_response_json["credential"]["username"]
settings.tool_repository_password = login_response_json["credential"]["password"]
settings.dump()
console.print(
@@ -185,7 +179,7 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
capture_output=False,
env=self._build_env_with_credentials(repository_handle),
text=True,
check=True,
check=True
)
if add_package_result.stderr:
@@ -210,11 +204,7 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
settings = Settings()
env = os.environ.copy()
env[f"UV_INDEX_{repository_handle}_USERNAME"] = str(
settings.tool_repository_username or ""
)
env[f"UV_INDEX_{repository_handle}_PASSWORD"] = str(
settings.tool_repository_password or ""
)
env[f"UV_INDEX_{repository_handle}_USERNAME"] = str(settings.tool_repository_username or "")
env[f"UV_INDEX_{repository_handle}_PASSWORD"] = str(settings.tool_repository_password or "")
return env

View File

@@ -91,6 +91,7 @@ LLM_CONTEXT_WINDOW_SIZES = {
"gemini-1.5-flash-8b": 1048576,
# deepseek
"deepseek-chat": 128000,
"deepseek-v3-250324": 128000,
# groq
"gemma2-9b-it": 8192,
"gemma-7b-it": 8192,
@@ -930,6 +931,17 @@ class LLM(BaseLLM):
) # Create a copy to avoid modifying the original
messages.append({"role": "user", "content": "Please continue."})
return messages
# Handle Deepseek-v3-250324 model - it also requires the last message to have a role of 'user'
if "deepseek-v3-250324" in self.model.lower():
# Check if the last message has a role of 'assistant'
if messages and messages[-1]["role"] == "assistant":
# Add a dummy user message to ensure the last message has a role of 'user'
messages = (
messages.copy()
) # Create a copy to avoid modifying the original
messages.append({"role": "user", "content": "Please continue."})
return messages
# Handle Anthropic models
if not self.is_anthropic:

View File

@@ -407,6 +407,37 @@ def test_anthropic_message_formatting(anthropic_llm, system_message, user_messag
assert formatted[0] == system_message
@pytest.fixture
def deepseek_llm():
"""Fixture for Deepseek-v3-250324 LLM instance."""
return LLM(model="deepseek-v3-250324")
def test_deepseek_message_formatting(deepseek_llm):
"""Test Deepseek-v3-250324 message formatting."""
# Test when last message is assistant
messages = [
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi there"},
]
formatted = deepseek_llm._format_messages_for_provider(messages)
assert len(formatted) == 3
assert formatted[0]["role"] == "user"
assert formatted[1]["role"] == "assistant"
assert formatted[2]["role"] == "user"
assert formatted[2]["content"] == "Please continue."
# Test when last message is not assistant
messages = [
{"role": "assistant", "content": "Hi there"},
{"role": "user", "content": "Hello"},
]
formatted = deepseek_llm._format_messages_for_provider(messages)
assert len(formatted) == 2
assert formatted[0]["role"] == "assistant"
assert formatted[1]["role"] == "user"
def test_deepseek_r1_with_open_router():
if not os.getenv("OPEN_ROUTER_API_KEY"):
pytest.skip("OPEN_ROUTER_API_KEY not set; skipping test.")