Compare commits

..

2 Commits

Author SHA1 Message Date
Devin AI
5320960f3f docs: add flow plot visualization example to documentation (#2668)
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-04-22 19:07:38 +00:00
Tony Kipkemboi
16eb4df556 docs: update docs.json with contextual options, SEO, and 404 redirect (#2654)
* docs: 0.114.0 release notes, navigation restructure, new guides, deploy video, and cleanup

- Add v0.114.0 release notes with highlights image and doc links
- Restructure docs navigation (Strategy group, Releases tab, navbar links)
- Update quickstart with deployment video and clearer instructions
- Add/rename guides (Custom Manager Agent, Custom LLM)
- Remove legacy concept/tool docs
- Add new images and tool docs
- Minor formatting and content improvements throughout

* docs: update docs.json with contextual options, SEO indexing, and 404 redirect settings
2025-04-22 09:52:27 -07:00
6 changed files with 26 additions and 99 deletions

View File

@@ -790,6 +790,9 @@ Visualizing your AI workflows can provide valuable insights into the structure a
Plots in CrewAI are graphical representations of your AI workflows. They display the various tasks, their connections, and the flow of data between them. This visualization helps in understanding the sequence of operations, identifying bottlenecks, and ensuring that the workflow logic aligns with your expectations.
![Example of a Flow Plot](/images/flow_plot_example.png)
*An example visualization of a simple flow with start method, sequential steps, and directional execution.*
### How to Generate a Plot
CrewAI provides two convenient methods to generate plots of your flows:

View File

@@ -8,6 +8,9 @@
"dark": "#C94C3C"
},
"favicon": "favicon.svg",
"contextual": {
"options": ["copy", "view", "chatgpt", "claude"]
},
"navigation": {
"tabs": [
{
@@ -244,7 +247,12 @@
"prompt": "Search CrewAI docs"
},
"seo": {
"indexing": "navigable"
"indexing": "all"
},
"errors": {
"404": {
"redirect": true
}
},
"footer": {
"socials": {

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

View File

@@ -75,7 +75,6 @@ class ToolUsage:
agent: Optional[Union["BaseAgent", "LiteAgent"]] = None,
action: Any = None,
fingerprint_context: Optional[Dict[str, str]] = None,
original_tools: List[Any] = [],
) -> None:
self._i18n: I18N = agent.i18n if agent else I18N()
self._printer: Printer = Printer()
@@ -87,7 +86,6 @@ class ToolUsage:
self.tools_description = render_text_description_and_args(tools)
self.tools_names = get_tool_names(tools)
self.tools_handler = tools_handler
self.original_tools = original_tools
self.tools = tools
self.task = task
self.action = action
@@ -193,16 +191,13 @@ class ToolUsage:
) # type: ignore
from_cache = result is not None
original_tool = None
if hasattr(self, 'original_tools') and self.original_tools:
original_tool = next(
(ot for ot in self.original_tools if ot.name == tool.name),
None
)
available_tool = next(
(at for at in self.tools if at.name == tool.name),
None
(
available_tool
for available_tool in self.tools
if available_tool.name == tool.name
),
None,
)
if result is None:
@@ -264,11 +259,10 @@ class ToolUsage:
if self.tools_handler:
should_cache = True
if original_tool and hasattr(original_tool, "cache_function") and original_tool.cache_function:
should_cache = original_tool.cache_function(
calling.arguments, result
)
elif available_tool and hasattr(available_tool, "cache_function") and available_tool.cache_function:
if (
hasattr(available_tool, "cache_function")
and available_tool.cache_function # type: ignore # Item "None" of "Any | None" has no attribute "cache_function"
):
should_cache = available_tool.cache_function( # type: ignore # Item "None" of "Any | None" has no attribute "cache_function"
calling.arguments, result
)
@@ -296,10 +290,10 @@ class ToolUsage:
result=result,
)
if original_tool and hasattr(original_tool, "result_as_answer") and original_tool.result_as_answer:
result_as_answer = original_tool.result_as_answer
data["result_as_answer"] = result_as_answer
elif available_tool and hasattr(available_tool, "result_as_answer") and available_tool.result_as_answer:
if (
hasattr(available_tool, "result_as_answer")
and available_tool.result_as_answer # type: ignore # Item "None" of "Any | None" has no attribute "cache_function"
):
result_as_answer = available_tool.result_as_answer # type: ignore # Item "None" of "Any | None" has no attribute "result_as_answer"
data["result_as_answer"] = result_as_answer # type: ignore

View File

@@ -60,7 +60,6 @@ def execute_tool_and_check_finality(
task=task,
agent=agent,
action=agent_action,
original_tools=tools, # Pass original tools to ensure custom tools work
)
# Parse tool calling

View File

@@ -1,77 +0,0 @@
from unittest.mock import MagicMock
import pytest
from pydantic import BaseModel, Field
from crewai import Agent, Task
from crewai.agents.crew_agent_executor import CrewAgentExecutor
from crewai.agents.parser import AgentAction
from crewai.agents.tools_handler import ToolsHandler
from crewai.tools import BaseTool
from crewai.utilities.i18n import I18N
from crewai.utilities.tool_utils import execute_tool_and_check_finality
class TestToolInput(BaseModel):
test_param: str = Field(..., description="A test parameter")
class TestCustomTool(BaseTool):
name: str = "Test Custom Tool"
description: str = "A test tool to verify custom tool invocation"
args_schema: type[BaseModel] = TestToolInput
def _run(self, test_param: str) -> str:
return f"Tool executed with param: {test_param}"
def test_custom_tool_invocation():
custom_tool = TestCustomTool()
mock_agent = MagicMock()
mock_task = MagicMock()
mock_llm = MagicMock()
mock_crew = MagicMock()
tools_handler = ToolsHandler()
executor = CrewAgentExecutor(
llm=mock_llm,
task=mock_task,
crew=mock_crew,
agent=mock_agent,
prompt={},
max_iter=5,
tools=[custom_tool],
tools_names="Test Custom Tool",
stop_words=[],
tools_description="A test tool to verify custom tool invocation",
tools_handler=tools_handler,
original_tools=[custom_tool]
)
action = AgentAction(
tool="Test Custom Tool",
tool_input={"test_param": "test_value"},
thought="I'll use the custom tool",
text="I'll use the Test Custom Tool to get a result"
)
i18n = I18N()
mock_agent.key = "test_agent"
mock_agent.role = "test_role"
result = execute_tool_and_check_finality(
agent_action=action,
tools=[custom_tool],
i18n=i18n,
agent_key=mock_agent.key,
agent_role=mock_agent.role,
tools_handler=tools_handler,
task=mock_task,
agent=mock_agent,
function_calling_llm=mock_llm
)
assert "Tool executed with param: test_value" in result.result
assert result.result_as_answer is False