diff --git a/lib/crewai/pyproject.toml b/lib/crewai/pyproject.toml index cd18a9124..4d45e8950 100644 --- a/lib/crewai/pyproject.toml +++ b/lib/crewai/pyproject.toml @@ -23,7 +23,6 @@ dependencies = [ "chromadb~=1.1.0", "tokenizers>=0.20.3", "openpyxl>=3.1.5", - "pyvis>=0.3.2", # Authentication and Security "python-dotenv>=1.1.1", "pyjwt>=2.9.0", diff --git a/lib/crewai/src/crewai/events/event_listener.py b/lib/crewai/src/crewai/events/event_listener.py index 8140ccc2b..69e1891c0 100644 --- a/lib/crewai/src/crewai/events/event_listener.py +++ b/lib/crewai/src/crewai/events/event_listener.py @@ -88,6 +88,7 @@ class EventListener(BaseEventListener): text_stream = StringIO() knowledge_retrieval_in_progress = False knowledge_query_in_progress = False + method_branches: dict[str, Any] = Field(default_factory=dict) def __new__(cls): if cls._instance is None: @@ -101,6 +102,7 @@ class EventListener(BaseEventListener): self._telemetry = Telemetry() self._telemetry.set_tracer() self.execution_spans = {} + self.method_branches = {} self._initialized = True self.formatter = ConsoleFormatter(verbose=True) @@ -263,7 +265,8 @@ class EventListener(BaseEventListener): @crewai_event_bus.on(FlowCreatedEvent) def on_flow_created(source, event: FlowCreatedEvent): self._telemetry.flow_creation_span(event.flow_name) - self.formatter.create_flow_tree(event.flow_name, str(source.flow_id)) + tree = self.formatter.create_flow_tree(event.flow_name, str(source.flow_id)) + self.formatter.current_flow_tree = tree @crewai_event_bus.on(FlowStartedEvent) def on_flow_started(source, event: FlowStartedEvent): @@ -280,30 +283,36 @@ class EventListener(BaseEventListener): @crewai_event_bus.on(MethodExecutionStartedEvent) def on_method_execution_started(source, event: MethodExecutionStartedEvent): - self.formatter.update_method_status( - self.formatter.current_method_branch, + method_branch = self.method_branches.get(event.method_name) + updated_branch = self.formatter.update_method_status( + method_branch, self.formatter.current_flow_tree, event.method_name, "running", ) + self.method_branches[event.method_name] = updated_branch @crewai_event_bus.on(MethodExecutionFinishedEvent) def on_method_execution_finished(source, event: MethodExecutionFinishedEvent): - self.formatter.update_method_status( - self.formatter.current_method_branch, + method_branch = self.method_branches.get(event.method_name) + updated_branch = self.formatter.update_method_status( + method_branch, self.formatter.current_flow_tree, event.method_name, "completed", ) + self.method_branches[event.method_name] = updated_branch @crewai_event_bus.on(MethodExecutionFailedEvent) def on_method_execution_failed(source, event: MethodExecutionFailedEvent): - self.formatter.update_method_status( - self.formatter.current_method_branch, + method_branch = self.method_branches.get(event.method_name) + updated_branch = self.formatter.update_method_status( + method_branch, self.formatter.current_flow_tree, event.method_name, "failed", ) + self.method_branches[event.method_name] = updated_branch # ----------- TOOL USAGE EVENTS ----------- diff --git a/lib/crewai/src/crewai/events/utils/console_formatter.py b/lib/crewai/src/crewai/events/utils/console_formatter.py index c76853d02..6a04d1fa6 100644 --- a/lib/crewai/src/crewai/events/utils/console_formatter.py +++ b/lib/crewai/src/crewai/events/utils/console_formatter.py @@ -357,7 +357,14 @@ class ConsoleFormatter: return flow_tree def start_flow(self, flow_name: str, flow_id: str) -> Tree | None: - """Initialize a flow execution tree.""" + """Initialize or update a flow execution tree.""" + if self.current_flow_tree is not None: + for child in self.current_flow_tree.children: + if "Starting Flow" in str(child.label): + child.label = Text("🚀 Flow Started", style="green") + break + return self.current_flow_tree + flow_tree = Tree("") flow_label = Text() flow_label.append("🌊 Flow: ", style="blue bold") @@ -436,27 +443,38 @@ class ConsoleFormatter: prefix, style = "🔄 Running:", "yellow" elif status == "completed": prefix, style = "✅ Completed:", "green" - # Update initialization node when a method completes successfully for child in flow_tree.children: if "Starting Flow" in str(child.label): child.label = Text("Flow Method Step", style="white") break else: prefix, style = "❌ Failed:", "red" - # Update initialization node on failure for child in flow_tree.children: if "Starting Flow" in str(child.label): child.label = Text("❌ Flow Step Failed", style="red") break - if not method_branch: - # Find or create method branch - for branch in flow_tree.children: - if method_name in str(branch.label): - method_branch = branch - break - if not method_branch: - method_branch = flow_tree.add("") + if method_branch is not None: + if method_branch in flow_tree.children: + method_branch.label = Text(prefix, style=f"{style} bold") + Text( + f" {method_name}", style=style + ) + self.print(flow_tree) + self.print() + return method_branch + + for branch in flow_tree.children: + label_str = str(branch.label) + if f" {method_name}" in label_str and ( + "Running:" in label_str + or "Completed:" in label_str + or "Failed:" in label_str + ): + method_branch = branch + break + + if method_branch is None: + method_branch = flow_tree.add("") method_branch.label = Text(prefix, style=f"{style} bold") + Text( f" {method_name}", style=style @@ -464,6 +482,7 @@ class ConsoleFormatter: self.print(flow_tree) self.print() + return method_branch def get_llm_tree(self, tool_name: str): diff --git a/lib/crewai/src/crewai/flow/__init__.py b/lib/crewai/src/crewai/flow/__init__.py index 8e055d939..bda0186c7 100644 --- a/lib/crewai/src/crewai/flow/__init__.py +++ b/lib/crewai/src/crewai/flow/__init__.py @@ -1,5 +1,25 @@ +from crewai.flow.visualization import ( + FlowStructure, + build_flow_structure, + print_structure_summary, + structure_to_dict, + visualize_flow_structure, +) from crewai.flow.flow import Flow, and_, listen, or_, router, start from crewai.flow.persistence import persist -__all__ = ["Flow", "and_", "listen", "or_", "persist", "router", "start"] +__all__ = [ + "Flow", + "FlowStructure", + "and_", + "build_flow_structure", + "listen", + "or_", + "persist", + "print_structure_summary", + "router", + "start", + "structure_to_dict", + "visualize_flow_structure", +] diff --git a/lib/crewai/src/crewai/flow/assets/crewai_flow_visual_template.html b/lib/crewai/src/crewai/flow/assets/crewai_flow_visual_template.html deleted file mode 100644 index f175ef1a7..000000000 --- a/lib/crewai/src/crewai/flow/assets/crewai_flow_visual_template.html +++ /dev/null @@ -1,93 +0,0 @@ - - - - - {{ title }} - - - - - -
-
-
-
-
- - -
-
- {{ network_content }} - - diff --git a/lib/crewai/src/crewai/flow/assets/crewai_logo.svg b/lib/crewai/src/crewai/flow/assets/crewai_logo.svg deleted file mode 100644 index 1668a48e5..000000000 --- a/lib/crewai/src/crewai/flow/assets/crewai_logo.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - - - - - - - - - diff --git a/lib/crewai/src/crewai/flow/constants.py b/lib/crewai/src/crewai/flow/constants.py new file mode 100644 index 000000000..c8720d529 --- /dev/null +++ b/lib/crewai/src/crewai/flow/constants.py @@ -0,0 +1,4 @@ +from typing import Final, Literal + +AND_CONDITION: Final[Literal["AND"]] = "AND" +OR_CONDITION: Final[Literal["OR"]] = "OR" diff --git a/lib/crewai/src/crewai/flow/flow.py b/lib/crewai/src/crewai/flow/flow.py index 44dcd0ec9..5bb4980a4 100644 --- a/lib/crewai/src/crewai/flow/flow.py +++ b/lib/crewai/src/crewai/flow/flow.py @@ -1,3 +1,9 @@ +"""Core flow execution framework with decorators and state management. + +This module provides the Flow class and decorators (@start, @listen, @router) +for building event-driven workflows with conditional execution and routing. +""" + from __future__ import annotations import asyncio @@ -9,6 +15,7 @@ import logging from typing import ( Any, ClassVar, + Final, Generic, Literal, ParamSpec, @@ -38,7 +45,7 @@ from crewai.events.types.flow_events import ( MethodExecutionFinishedEvent, MethodExecutionStartedEvent, ) -from crewai.flow.flow_visualizer import plot_flow +from crewai.flow.visualization import build_flow_structure, render_interactive from crewai.flow.flow_wrappers import ( FlowCondition, FlowConditions, @@ -58,7 +65,11 @@ from crewai.flow.utils import ( is_flow_method_callable, is_flow_method_name, is_simple_flow_condition, + _extract_all_methods, + _extract_all_methods_recursive, + _normalize_condition, ) +from crewai.flow.constants import AND_CONDITION, OR_CONDITION from crewai.utilities.printer import Printer, PrinterColor @@ -74,95 +85,63 @@ class FlowState(BaseModel): ) -# type variables with explicit bounds -T = TypeVar("T", bound=dict[str, Any] | BaseModel) # Generic flow state type parameter -StateT = TypeVar( - "StateT", bound=dict[str, Any] | BaseModel -) # State validation type parameter -P = ParamSpec("P") # ParamSpec for preserving function signatures in decorators -R = TypeVar("R") # Generic return type for decorated methods -F = TypeVar("F", bound=Callable[..., Any]) # Function type for decorator preservation - - -def ensure_state_type(state: Any, expected_type: type[StateT]) -> StateT: - """Ensure state matches expected type with proper validation. - - Args: - state: State instance to validate - expected_type: Expected type for the state - - Returns: - Validated state instance - - Raises: - TypeError: If state doesn't match expected type - ValueError: If state validation fails - """ - if expected_type is dict: - if not isinstance(state, dict): - raise TypeError(f"Expected dict, got {type(state).__name__}") - return cast(StateT, state) - if isinstance(expected_type, type) and issubclass(expected_type, BaseModel): - if not isinstance(state, expected_type): - raise TypeError( - f"Expected {expected_type.__name__}, got {type(state).__name__}" - ) - return state - raise TypeError(f"Invalid expected_type: {expected_type}") +T = TypeVar("T", bound=dict[str, Any] | BaseModel) +P = ParamSpec("P") +R = TypeVar("R") +F = TypeVar("F", bound=Callable[..., Any]) def start( condition: str | FlowCondition | Callable[..., Any] | None = None, ) -> Callable[[Callable[P, R]], StartMethod[P, R]]: - """ - Marks a method as a flow's starting point. + """Marks a method as a flow's starting point. This decorator designates a method as an entry point for the flow execution. It can optionally specify conditions that trigger the start based on other method executions. - Parameters - ---------- - condition : Optional[Union[str, FlowCondition, Callable[..., Any]]], optional - Defines when the start method should execute. Can be: - - str: Name of a method that triggers this start - - FlowCondition: Result from or_() or and_(), including nested conditions - - Callable[..., Any]: A method reference that triggers this start - Default is None, meaning unconditional start. + Args: + condition: Defines when the start method should execute. Can be: + - str: Name of a method that triggers this start + - FlowCondition: Result from or_() or and_(), including nested conditions + - Callable[..., Any]: A method reference that triggers this start + Default is None, meaning unconditional start. - Returns - ------- - Callable[[Callable[P, R]], StartMethod[P, R]] - A decorator function that wraps the method as a flow start point - and preserves its signature. + Returns: + A decorator function that wraps the method as a flow start point and preserves its signature. - Raises - ------ - ValueError - If the condition format is invalid. + Raises: + ValueError: If the condition format is invalid. - Examples - -------- - >>> @start() # Unconditional start - >>> def begin_flow(self): - ... pass + Examples: + >>> @start() # Unconditional start + >>> def begin_flow(self): + ... pass - >>> @start("method_name") # Start after specific method - >>> def conditional_start(self): - ... pass + >>> @start("method_name") # Start after specific method + >>> def conditional_start(self): + ... pass - >>> @start(and_("method1", "method2")) # Start after multiple methods - >>> def complex_start(self): - ... pass + >>> @start(and_("method1", "method2")) # Start after multiple methods + >>> def complex_start(self): + ... pass """ def decorator(func: Callable[P, R]) -> StartMethod[P, R]: + """Decorator that wraps a function as a start method. + + Args: + func: The function to wrap as a start method. + + Returns: + A StartMethod wrapper around the function. + """ wrapper = StartMethod(func) if condition is not None: if is_flow_method_name(condition): wrapper.__trigger_methods__ = [condition] - wrapper.__condition_type__ = "OR" + wrapper.__condition_type__ = OR_CONDITION elif is_flow_condition_dict(condition): if "conditions" in condition: wrapper.__trigger_condition__ = condition @@ -177,7 +156,7 @@ def start( ) elif is_flow_method_callable(condition): wrapper.__trigger_methods__ = [condition.__name__] - wrapper.__condition_type__ = "OR" + wrapper.__condition_type__ = OR_CONDITION else: raise ValueError( "Condition must be a method, string, or a result of or_() or and_()" @@ -190,49 +169,45 @@ def start( def listen( condition: str | FlowCondition | Callable[..., Any], ) -> Callable[[Callable[P, R]], ListenMethod[P, R]]: - """ - Creates a listener that executes when specified conditions are met. + """Creates a listener that executes when specified conditions are met. This decorator sets up a method to execute in response to other method executions in the flow. It supports both simple and complex triggering conditions. - Parameters - ---------- - condition : Union[str, FlowCondition, Callable[..., Any]] - Specifies when the listener should execute. Can be: - - str: Name of a method that triggers this listener - - FlowCondition: Result from or_() or and_(), including nested conditions - - Callable[..., Any]: A method reference that triggers this listener + Args: + condition: Specifies when the listener should execute. - Returns - ------- - Callable[[Callable[P, R]], ListenMethod[P, R]] - A decorator function that wraps the method as a listener - and preserves its signature. + Returns: + A decorator function that wraps the method as a flow listener and preserves its signature. - Raises - ------ - ValueError - If the condition format is invalid. + Raises: + ValueError: If the condition format is invalid. - Examples - -------- - >>> @listen("process_data") # Listen to single method - >>> def handle_processed_data(self): - ... pass + Examples: + >>> @listen("process_data") + >>> def handle_processed_data(self): + ... pass - >>> @listen(or_("success", "failure")) # Listen to multiple methods - >>> def handle_completion(self): - ... pass + >>> @listen("method_name") + >>> def handle_completion(self): + ... pass """ def decorator(func: Callable[P, R]) -> ListenMethod[P, R]: + """Decorator that wraps a function as a listener method. + + Args: + func: The function to wrap as a listener method. + + Returns: + A ListenMethod wrapper around the function. + """ wrapper = ListenMethod(func) if is_flow_method_name(condition): wrapper.__trigger_methods__ = [condition] - wrapper.__condition_type__ = "OR" + wrapper.__condition_type__ = OR_CONDITION elif is_flow_condition_dict(condition): if "conditions" in condition: wrapper.__trigger_condition__ = condition @@ -247,7 +222,7 @@ def listen( ) elif is_flow_method_callable(condition): wrapper.__trigger_methods__ = [condition.__name__] - wrapper.__condition_type__ = "OR" + wrapper.__condition_type__ = OR_CONDITION else: raise ValueError( "Condition must be a method, string, or a result of or_() or and_()" @@ -260,54 +235,53 @@ def listen( def router( condition: str | FlowCondition | Callable[..., Any], ) -> Callable[[Callable[P, R]], RouterMethod[P, R]]: - """ - Creates a routing method that directs flow execution based on conditions. + """Creates a routing method that directs flow execution based on conditions. This decorator marks a method as a router, which can dynamically determine the next steps in the flow based on its return value. Routers are triggered by specified conditions and can return constants that determine which path the flow should take. - Parameters - ---------- - condition : Union[str, FlowCondition, Callable[..., Any]] - Specifies when the router should execute. Can be: - - str: Name of a method that triggers this router - - FlowCondition: Result from or_() or and_(), including nested conditions - - Callable[..., Any]: A method reference that triggers this router + Args: + condition: Specifies when the router should execute. Can be: + - str: Name of a method that triggers this router + - FlowCondition: Result from or_() or and_(), including nested conditions + - Callable[..., Any]: A method reference that triggers this router - Returns - ------- - Callable[[Callable[P, R]], RouterMethod[P, R]] - A decorator function that wraps the method as a router - and preserves its signature. + Returns: + A decorator function that wraps the method as a router and preserves its signature. - Raises - ------ - ValueError - If the condition format is invalid. + Raises: + ValueError: If the condition format is invalid. - Examples - -------- - >>> @router("check_status") - >>> def route_based_on_status(self): - ... if self.state.status == "success": - ... return SUCCESS - ... return FAILURE + Examples: + >>> @router("check_status") + >>> def route_based_on_status(self): + ... if self.state.status == "success": + ... return "SUCCESS" + ... return "FAILURE" - >>> @router(and_("validate", "process")) - >>> def complex_routing(self): - ... if all([self.state.valid, self.state.processed]): - ... return CONTINUE - ... return STOP + >>> @router(and_("validate", "process")) + >>> def complex_routing(self): + ... if all([self.state.valid, self.state.processed]): + ... return "CONTINUE" + ... return "STOP" """ def decorator(func: Callable[P, R]) -> RouterMethod[P, R]: + """Decorator that wraps a function as a router method. + + Args: + func: The function to wrap as a router method. + + Returns: + A RouterMethod wrapper around the function. + """ wrapper = RouterMethod(func) if is_flow_method_name(condition): wrapper.__trigger_methods__ = [condition] - wrapper.__condition_type__ = "OR" + wrapper.__condition_type__ = OR_CONDITION elif is_flow_condition_dict(condition): if "conditions" in condition: wrapper.__trigger_condition__ = condition @@ -322,7 +296,7 @@ def router( ) elif is_flow_method_callable(condition): wrapper.__trigger_methods__ = [condition.__name__] - wrapper.__condition_type__ = "OR" + wrapper.__condition_type__ = OR_CONDITION else: raise ValueError( "Condition must be a method, string, or a result of or_() or and_()" @@ -333,42 +307,29 @@ def router( def or_(*conditions: str | FlowCondition | Callable[..., Any]) -> FlowCondition: - """ - Combines multiple conditions with OR logic for flow control. + """Combines multiple conditions with OR logic for flow control. Creates a condition that is satisfied when any of the specified conditions are met. This is used with @start, @listen, or @router decorators to create complex triggering conditions. - Parameters - ---------- - *conditions : Union[str, dict[str, Any], Callable[..., Any]] - Variable number of conditions that can be: - - str: Method names - - dict[str, Any]: Existing condition dictionaries (nested conditions) - - Callable[..., Any]: Method references + Args: + conditions: Variable number of conditions that can be method names, existing condition dictionaries, or method references. - Returns - ------- - dict[str, Any] - A condition dictionary with format: - {"type": "OR", "conditions": list_of_conditions} - where each condition can be a string (method name) or a nested dict + Returns: + A condition dictionary with format {"type": "OR", "conditions": list_of_conditions} where each condition can be a string (method name) or a nested dict - Raises - ------ - ValueError - If any condition is invalid. + Raises: + ValueError: If condition format is invalid. - Examples - -------- - >>> @listen(or_("success", "timeout")) - >>> def handle_completion(self): - ... pass + Examples: + >>> @listen(or_("success", "timeout")) + >>> def handle_completion(self): + ... pass - >>> @listen(or_(and_("step1", "step2"), "step3")) - >>> def handle_nested(self): - ... pass + >>> @listen(or_(and_("step1", "step2"), "step3")) + >>> def handle_nested(self): + ... pass """ processed_conditions: FlowConditions = [] for condition in conditions: @@ -378,46 +339,34 @@ def or_(*conditions: str | FlowCondition | Callable[..., Any]) -> FlowCondition: processed_conditions.append(condition.__name__) else: raise ValueError("Invalid condition in or_()") - return {"type": "OR", "conditions": processed_conditions} + return {"type": OR_CONDITION, "conditions": processed_conditions} def and_(*conditions: str | FlowCondition | Callable[..., Any]) -> FlowCondition: - """ - Combines multiple conditions with AND logic for flow control. + """Combines multiple conditions with AND logic for flow control. Creates a condition that is satisfied only when all specified conditions are met. This is used with @start, @listen, or @router decorators to create complex triggering conditions. - Parameters - ---------- - *conditions : Union[str, dict[str, Any], Callable[..., Any]] - Variable number of conditions that can be: - - str: Method names - - dict[str, Any]: Existing condition dictionaries (nested conditions) - - Callable[..., Any]: Method references + Args: + *conditions: Variable number of conditions that can be method names, existing condition dictionaries, or method references. - Returns - ------- - dict[str, Any] - A condition dictionary with format: - {"type": "AND", "conditions": list_of_conditions} + Returns: + A condition dictionary with format {"type": "AND", "conditions": list_of_conditions} where each condition can be a string (method name) or a nested dict - Raises - ------ - ValueError - If any condition is invalid. + Raises: + ValueError: If any condition is invalid. - Examples - -------- - >>> @listen(and_("validated", "processed")) - >>> def handle_complete_data(self): - ... pass + Examples: + >>> @listen(and_("validated", "processed")) + >>> def handle_complete_data(self): + ... pass - >>> @listen(and_(or_("step1", "step2"), "step3")) - >>> def handle_nested(self): - ... pass + >>> @listen(and_(or_("step1", "step2"), "step3")) + >>> def handle_nested(self): + ... pass """ processed_conditions: FlowConditions = [] for condition in conditions: @@ -427,59 +376,7 @@ def and_(*conditions: str | FlowCondition | Callable[..., Any]) -> FlowCondition processed_conditions.append(condition.__name__) else: raise ValueError("Invalid condition in and_()") - return {"type": "AND", "conditions": processed_conditions} - - -def _normalize_condition( - condition: FlowConditions | FlowCondition | FlowMethodName, -) -> FlowCondition: - """Normalize a condition to standard format with 'conditions' key. - - Args: - condition: Can be a string (method name), dict (condition), or list - - Returns: - Normalized dict with 'type' and 'conditions' keys - """ - if is_flow_method_name(condition): - return {"type": "OR", "conditions": [condition]} - if is_flow_condition_dict(condition): - if "conditions" in condition: - return condition - if "methods" in condition: - return {"type": condition["type"], "conditions": condition["methods"]} - return condition - if is_flow_condition_list(condition): - return {"type": "OR", "conditions": condition} - - raise ValueError(f"Cannot normalize condition: {condition}") - - -def _extract_all_methods( - condition: str | FlowCondition | dict[str, Any] | list[Any], -) -> list[FlowMethodName]: - """Extract all method names from a condition (including nested). - - Args: - condition: Can be a string, dict, or list - - Returns: - List of all method names in the condition tree - """ - if is_flow_method_name(condition): - return [condition] - if is_flow_condition_dict(condition): - normalized = _normalize_condition(condition) - methods = [] - for sub_cond in normalized.get("conditions", []): - methods.extend(_extract_all_methods(sub_cond)) - return methods - if isinstance(condition, list): - methods = [] - for item in condition: - methods.extend(_extract_all_methods(item)) - return methods - return [] + return {"type": AND_CONDITION, "conditions": processed_conditions} class FlowMeta(type): @@ -515,7 +412,9 @@ class FlowMeta(type): and attr_value.__trigger_methods__ is not None ): methods = attr_value.__trigger_methods__ - condition_type = getattr(attr_value, "__condition_type__", "OR") + condition_type = getattr( + attr_value, "__condition_type__", OR_CONDITION + ) if ( hasattr(attr_value, "__trigger_condition__") and attr_value.__trigger_condition__ is not None @@ -556,7 +455,7 @@ class Flow(Generic[T], metaclass=FlowMeta): name: str | None = None tracing: bool | None = False - def __class_getitem__(cls: type[Flow[StateT]], item: type[T]) -> type[Flow[StateT]]: + def __class_getitem__(cls: type[Flow[T]], item: type[T]) -> type[Flow[T]]: class _FlowGeneric(cls): # type: ignore _initial_state_t = item @@ -1037,24 +936,20 @@ class Flow(Generic[T], metaclass=FlowMeta): detach(flow_token) async def _execute_start_method(self, start_method_name: FlowMethodName) -> None: - """ - Executes a flow's start method and its triggered listeners. + """Executes a flow's start method and its triggered listeners. This internal method handles the execution of methods marked with @start decorator and manages the subsequent chain of listener executions. - Parameters - ---------- - start_method_name : str - The name of the start method to execute. + Args: + start_method_name: The name of the start method to execute. - Notes - ----- - - Executes the start method and captures its result - - Triggers execution of any listeners waiting on this start method - - Part of the flow's initialization sequence - - Skips execution if method was already completed (e.g., after reload) - - Automatically injects crewai_trigger_payload if available in flow inputs + Note: + - Executes the start method and captures its result + - Triggers execution of any listeners waiting on this start method + - Part of the flow's initialization sequence + - Skips execution if method was already completed (e.g., after reload) + - Automatically injects crewai_trigger_payload if available in flow inputs """ if start_method_name in self._completed_methods: if self._is_execution_resuming: @@ -1174,27 +1069,21 @@ class Flow(Generic[T], metaclass=FlowMeta): async def _execute_listeners( self, trigger_method: FlowMethodName, result: Any ) -> None: - """ - Executes all listeners and routers triggered by a method completion. + """Executes all listeners and routers triggered by a method completion. This internal method manages the execution flow by: 1. First executing all triggered routers sequentially 2. Then executing all triggered listeners in parallel - Parameters - ---------- - trigger_method : str - The name of the method that triggered these listeners. - result : Any - The result from the triggering method, passed to listeners - that accept parameters. + Args: + trigger_method: The name of the method that triggered these listeners. + result: The result from the triggering method, passed to listeners that accept parameters. - Notes - ----- - - Routers are executed sequentially to maintain flow control - - Each router's result becomes a new trigger_method - - Normal listeners are executed in parallel for efficiency - - Listeners can receive the trigger method's result as a parameter + Note: + - Routers are executed sequentially to maintain flow control + - Each router's result becomes a new trigger_method + - Normal listeners are executed in parallel for efficiency + - Listeners can receive the trigger method's result as a parameter """ # First, handle routers repeatedly until no router triggers anymore router_results = [] @@ -1281,16 +1170,16 @@ class Flow(Generic[T], metaclass=FlowMeta): if is_flow_condition_dict(condition): normalized = _normalize_condition(condition) - cond_type = normalized.get("type", "OR") + cond_type = normalized.get("type", OR_CONDITION) sub_conditions = normalized.get("conditions", []) - if cond_type == "OR": + if cond_type == OR_CONDITION: return any( self._evaluate_condition(sub_cond, trigger_method, listener_name) for sub_cond in sub_conditions ) - if cond_type == "AND": + if cond_type == AND_CONDITION: pending_key = PendingListenerKey(f"{listener_name}:{id(condition)}") if pending_key not in self._pending_and_listeners: @@ -1300,7 +1189,20 @@ class Flow(Generic[T], metaclass=FlowMeta): if trigger_method in self._pending_and_listeners[pending_key]: self._pending_and_listeners[pending_key].discard(trigger_method) - if not self._pending_and_listeners[pending_key]: + direct_methods_satisfied = not self._pending_and_listeners[pending_key] + + nested_conditions_satisfied = all( + ( + self._evaluate_condition( + sub_cond, trigger_method, listener_name + ) + if is_flow_condition_dict(sub_cond) + else True + ) + for sub_cond in sub_conditions + ) + + if direct_methods_satisfied and nested_conditions_satisfied: self._pending_and_listeners.pop(pending_key, None) return True @@ -1311,30 +1213,22 @@ class Flow(Generic[T], metaclass=FlowMeta): def _find_triggered_methods( self, trigger_method: FlowMethodName, router_only: bool ) -> list[FlowMethodName]: - """ - Finds all methods that should be triggered based on conditions. + """Finds all methods that should be triggered based on conditions. This internal method evaluates both OR and AND conditions to determine which methods should be executed next in the flow. Supports nested conditions. - Parameters - ---------- - trigger_method : str - The name of the method that just completed execution. - router_only : bool - If True, only consider router methods. - If False, only consider non-router methods. + Args: + trigger_method: The name of the method that just completed execution. + router_only: If True, only consider router methods. If False, only consider non-router methods. - Returns - ------- - list[str] + Returns: Names of methods that should be triggered. - Notes - ----- - - Handles both OR and AND conditions, including nested combinations - - Maintains state for AND conditions using _pending_and_listeners - - Separates router and normal listener evaluation + Note: + - Handles both OR and AND conditions, including nested combinations + - Maintains state for AND conditions using _pending_and_listeners + - Separates router and normal listener evaluation """ triggered: list[FlowMethodName] = [] @@ -1350,10 +1244,10 @@ class Flow(Generic[T], metaclass=FlowMeta): if is_simple_flow_condition(condition_data): condition_type, methods = condition_data - if condition_type == "OR": + if condition_type == OR_CONDITION: if trigger_method in methods: triggered.append(listener_name) - elif condition_type == "AND": + elif condition_type == AND_CONDITION: pending_key = PendingListenerKey(listener_name) if pending_key not in self._pending_and_listeners: self._pending_and_listeners[pending_key] = set(methods) @@ -1375,33 +1269,23 @@ class Flow(Generic[T], metaclass=FlowMeta): async def _execute_single_listener( self, listener_name: FlowMethodName, result: Any ) -> None: - """ - Executes a single listener method with proper event handling. + """Executes a single listener method with proper event handling. This internal method manages the execution of an individual listener, including parameter inspection, event emission, and error handling. - Parameters - ---------- - listener_name : str - The name of the listener method to execute. - result : Any - The result from the triggering method, which may be passed - to the listener if it accepts parameters. + Args: + listener_name: The name of the listener method to execute. + result: The result from the triggering method, which may be passed to the listener if it accepts parameters. - Notes - ----- - - Inspects method signature to determine if it accepts the trigger result - - Emits events for method execution start and finish - - Handles errors gracefully with detailed logging - - Recursively triggers listeners of this listener - - Supports both parameterized and parameter-less listeners - - Skips execution if method was already completed (e.g., after reload) - - Error Handling - ------------- - Catches and logs any exceptions during execution, preventing - individual listener failures from breaking the entire flow. + Note: + - Inspects method signature to determine if it accepts the trigger result + - Emits events for method execution start and finish + - Handles errors gracefully with detailed logging + - Recursively triggers listeners of this listener + - Supports both parameterized and parameter-less listeners + - Skips execution if method was already completed (e.g., after reload) + - Catches and logs any exceptions during execution, preventing individual listener failures from breaking the entire flow """ if listener_name in self._completed_methods: if self._is_execution_resuming: @@ -1460,7 +1344,16 @@ class Flow(Generic[T], metaclass=FlowMeta): logger.info(message) logger.warning(message) - def plot(self, filename: str = "crewai_flow") -> None: + def plot(self, filename: str = "crewai_flow.html", show: bool = True) -> str: + """Create interactive HTML visualization of Flow structure. + + Args: + filename: Output HTML filename (default: "crewai_flow.html"). + show: Whether to open in browser (default: True). + + Returns: + Absolute path to generated HTML file. + """ crewai_event_bus.emit( self, FlowPlotEvent( @@ -1468,4 +1361,5 @@ class Flow(Generic[T], metaclass=FlowMeta): flow_name=self.name or self.__class__.__name__, ), ) - plot_flow(self, filename) + structure = build_flow_structure(self) + return render_interactive(structure, filename=filename, show=show) diff --git a/lib/crewai/src/crewai/flow/flow_visualizer.py b/lib/crewai/src/crewai/flow/flow_visualizer.py deleted file mode 100644 index d49f2cf34..000000000 --- a/lib/crewai/src/crewai/flow/flow_visualizer.py +++ /dev/null @@ -1,234 +0,0 @@ -# flow_visualizer.py -from __future__ import annotations - -import os -from typing import TYPE_CHECKING, Any - -from pyvis.network import Network # type: ignore[import-untyped] - -from crewai.flow.config import COLORS, NODE_STYLES, NodeStyles -from crewai.flow.html_template_handler import HTMLTemplateHandler -from crewai.flow.legend_generator import generate_legend_items_html, get_legend_items -from crewai.flow.path_utils import safe_path_join -from crewai.flow.utils import calculate_node_levels -from crewai.flow.visualization_utils import ( - add_edges, - add_nodes_to_network, - compute_positions, -) -from crewai.utilities.printer import Printer - - -if TYPE_CHECKING: - from crewai.flow.flow import Flow - - -_printer = Printer() - - -class FlowPlot: - """Handles the creation and rendering of flow visualization diagrams.""" - - def __init__(self, flow: Flow[Any]) -> None: - """ - Initialize FlowPlot with a flow object. - - Parameters - ---------- - flow : Flow - A Flow instance to visualize. - - Raises - ------ - ValueError - If flow object is invalid or missing required attributes. - """ - self.flow = flow - self.colors = COLORS - self.node_styles: NodeStyles = NODE_STYLES - - def plot(self, filename: str) -> None: - """ - Generate and save an HTML visualization of the flow. - - Parameters - ---------- - filename : str - Name of the output file (without extension). - - Raises - ------ - ValueError - If filename is invalid or network generation fails. - IOError - If file operations fail or visualization cannot be generated. - RuntimeError - If network visualization generation fails. - """ - - try: - # Initialize network - net = Network(directed=True, height="750px", bgcolor=self.colors["bg"]) - - # Set options to disable physics - net.set_options( - """ - var options = { - "nodes": { - "font": { - "multi": "html" - } - }, - "physics": { - "enabled": false - } - } - """ - ) - - # Calculate levels for nodes - try: - node_levels = calculate_node_levels(self.flow) - except Exception as e: - raise ValueError(f"Failed to calculate node levels: {e!s}") from e - - # Compute positions - try: - node_positions = compute_positions(self.flow, node_levels) - except Exception as e: - raise ValueError(f"Failed to compute node positions: {e!s}") from e - - # Add nodes to the network - try: - add_nodes_to_network(net, self.flow, node_positions, self.node_styles) - except Exception as e: - raise RuntimeError(f"Failed to add nodes to network: {e!s}") from e - - # Add edges to the network - try: - add_edges(net, self.flow, node_positions, self.colors) - except Exception as e: - raise RuntimeError(f"Failed to add edges to network: {e!s}") from e - - # Generate HTML - try: - network_html = net.generate_html() - final_html_content = self._generate_final_html(network_html) - except Exception as e: - raise RuntimeError( - f"Failed to generate network visualization: {e!s}" - ) from e - - # Save the final HTML content to the file - try: - with open(f"{filename}.html", "w", encoding="utf-8") as f: - f.write(final_html_content) - _printer.print(f"Plot saved as {filename}.html", color="green") - except IOError as e: - raise IOError( - f"Failed to save flow visualization to {filename}.html: {e!s}" - ) from e - - except (ValueError, RuntimeError, IOError) as e: - raise e - except Exception as e: - raise RuntimeError( - f"Unexpected error during flow visualization: {e!s}" - ) from e - finally: - self._cleanup_pyvis_lib(filename) - - def _generate_final_html(self, network_html: str) -> str: - """ - Generate the final HTML content with network visualization and legend. - - Parameters - ---------- - network_html : str - HTML content generated by pyvis Network. - - Returns - ------- - str - Complete HTML content with styling and legend. - - Raises - ------ - IOError - If template or logo files cannot be accessed. - ValueError - If network_html is invalid. - """ - if not network_html: - raise ValueError("Invalid network HTML content") - - try: - # Extract just the body content from the generated HTML - current_dir = os.path.dirname(__file__) - template_path = safe_path_join( - "assets", "crewai_flow_visual_template.html", root=current_dir - ) - logo_path = safe_path_join("assets", "crewai_logo.svg", root=current_dir) - - if not os.path.exists(template_path): - raise IOError(f"Template file not found: {template_path}") - if not os.path.exists(logo_path): - raise IOError(f"Logo file not found: {logo_path}") - - html_handler = HTMLTemplateHandler(template_path, logo_path) - network_body = html_handler.extract_body_content(network_html) - - # Generate the legend items HTML - legend_items = get_legend_items(self.colors) - legend_items_html = generate_legend_items_html(legend_items) - return html_handler.generate_final_html(network_body, legend_items_html) - except Exception as e: - raise IOError(f"Failed to generate visualization HTML: {e!s}") from e - - @staticmethod - def _cleanup_pyvis_lib(filename: str) -> None: - """ - Clean up the generated lib folder from pyvis. - - This method safely removes the temporary lib directory created by pyvis - during network visualization generation. The lib folder is created in the - same directory as the output HTML file. - - Parameters - ---------- - filename : str - The output filename (without .html extension) used for the visualization. - """ - try: - import shutil - - output_dir = os.path.dirname(os.path.abspath(filename)) or os.getcwd() - lib_folder = os.path.join(output_dir, "lib") - if os.path.exists(lib_folder) and os.path.isdir(lib_folder): - vis_js = os.path.join(lib_folder, "vis-network.min.js") - if os.path.exists(vis_js): - shutil.rmtree(lib_folder) - except Exception as e: - _printer.print(f"Error cleaning up lib folder: {e}", color="red") - - -def plot_flow(flow: Flow[Any], filename: str = "flow_plot") -> None: - """ - Convenience function to create and save a flow visualization. - - Parameters - ---------- - flow : Flow - Flow instance to visualize. - filename : str, optional - Output filename without extension, by default "flow_plot". - - Raises - ------ - ValueError - If flow object or filename is invalid. - IOError - If file operations fail. - """ - visualizer = FlowPlot(flow) - visualizer.plot(filename) diff --git a/lib/crewai/src/crewai/flow/flow_wrappers.py b/lib/crewai/src/crewai/flow/flow_wrappers.py index 0f3b1a5fe..8d81d677a 100644 --- a/lib/crewai/src/crewai/flow/flow_wrappers.py +++ b/lib/crewai/src/crewai/flow/flow_wrappers.py @@ -5,7 +5,6 @@ from __future__ import annotations from collections.abc import Callable, Sequence import functools import inspect -import types from typing import Any, Generic, Literal, ParamSpec, TypeAlias, TypeVar, TypedDict from typing_extensions import Required, Self @@ -17,8 +16,6 @@ P = ParamSpec("P") R = TypeVar("R") FlowConditionType: TypeAlias = Literal["OR", "AND"] - -# Simple flow condition stored as tuple (condition_type, method_list) SimpleFlowCondition: TypeAlias = tuple[FlowConditionType, list[FlowMethodName]] @@ -26,6 +23,11 @@ class FlowCondition(TypedDict, total=False): """Type definition for flow trigger conditions. This is a recursive structure where conditions can contain nested FlowConditions. + + Attributes: + type: The type of the condition. + conditions: A list of conditions types. + methods: A list of methods. """ type: Required[FlowConditionType] @@ -79,8 +81,7 @@ class FlowMethod(Generic[P, R]): The result of calling the wrapped method. """ if self._instance is not None: - bound = types.MethodType(self._meth, self._instance) - return bound(*args, **kwargs) + return self._meth(self._instance, *args, **kwargs) return self._meth(*args, **kwargs) def unwrap(self) -> Callable[P, R]: diff --git a/lib/crewai/src/crewai/flow/html_template_handler.py b/lib/crewai/src/crewai/flow/html_template_handler.py deleted file mode 100644 index 9218d1ae9..000000000 --- a/lib/crewai/src/crewai/flow/html_template_handler.py +++ /dev/null @@ -1,91 +0,0 @@ -"""HTML template processing and generation for flow visualization diagrams.""" - -import base64 -import re -from typing import Any - -from crewai.flow.path_utils import validate_path_exists - - -class HTMLTemplateHandler: - """Handles HTML template processing and generation for flow visualization diagrams.""" - - def __init__(self, template_path: str, logo_path: str) -> None: - """ - Initialize HTMLTemplateHandler with validated template and logo paths. - - Parameters - ---------- - template_path : str - Path to the HTML template file. - logo_path : str - Path to the logo image file. - - Raises - ------ - ValueError - If template or logo paths are invalid or files don't exist. - """ - try: - self.template_path = validate_path_exists(template_path, "file") - self.logo_path = validate_path_exists(logo_path, "file") - except ValueError as e: - raise ValueError(f"Invalid template or logo path: {e}") from e - - def read_template(self) -> str: - """Read and return the HTML template file contents.""" - with open(self.template_path, "r", encoding="utf-8") as f: - return f.read() - - def encode_logo(self) -> str: - """Convert the logo SVG file to base64 encoded string.""" - with open(self.logo_path, "rb") as logo_file: - logo_svg_data = logo_file.read() - return base64.b64encode(logo_svg_data).decode("utf-8") - - def extract_body_content(self, html: str) -> str: - """Extract and return content between body tags from HTML string.""" - match = re.search("(.*?)", html, re.DOTALL) - return match.group(1) if match else "" - - def generate_legend_items_html(self, legend_items: list[dict[str, Any]]) -> str: - """Generate HTML markup for the legend items.""" - legend_items_html = "" - for item in legend_items: - if "border" in item: - legend_items_html += f""" -
-
-
{item["label"]}
-
- """ - elif item.get("dashed") is not None: - style = "dashed" if item["dashed"] else "solid" - legend_items_html += f""" -
-
-
{item["label"]}
-
- """ - else: - legend_items_html += f""" -
-
-
{item["label"]}
-
- """ - return legend_items_html - - def generate_final_html( - self, network_body: str, legend_items_html: str, title: str = "Flow Plot" - ) -> str: - """Combine all components into final HTML document with network visualization.""" - html_template = self.read_template() - logo_svg_base64 = self.encode_logo() - - return ( - html_template.replace("{{ title }}", title) - .replace("{{ network_content }}", network_body) - .replace("{{ logo_svg_base64 }}", logo_svg_base64) - .replace("", legend_items_html) - ) diff --git a/lib/crewai/src/crewai/flow/legend_generator.py b/lib/crewai/src/crewai/flow/legend_generator.py deleted file mode 100644 index 7a1e06582..000000000 --- a/lib/crewai/src/crewai/flow/legend_generator.py +++ /dev/null @@ -1,84 +0,0 @@ -"""Legend generation for flow visualization diagrams.""" - -from typing import Any - -from crewai.flow.config import FlowColors - - -def get_legend_items(colors: FlowColors) -> list[dict[str, Any]]: - """Generate legend items based on flow colors. - - Parameters - ---------- - colors : FlowColors - Dictionary containing color definitions for flow elements. - - Returns - ------- - list[dict[str, Any]] - List of legend item dictionaries with labels and styling. - """ - return [ - {"label": "Start Method", "color": colors["start"]}, - {"label": "Method", "color": colors["method"]}, - { - "label": "Crew Method", - "color": colors["bg"], - "border": colors["start"], - "dashed": False, - }, - { - "label": "Router", - "color": colors["router"], - "border": colors["router_border"], - "dashed": True, - }, - {"label": "Trigger", "color": colors["edge"], "dashed": False}, - {"label": "AND Trigger", "color": colors["edge"], "dashed": True}, - { - "label": "Router Trigger", - "color": colors["router_edge"], - "dashed": True, - }, - ] - - -def generate_legend_items_html(legend_items: list[dict[str, Any]]) -> str: - """Generate HTML markup for legend items. - - Parameters - ---------- - legend_items : list[dict[str, Any]] - List of legend item dictionaries containing labels and styling. - - Returns - ------- - str - HTML string containing formatted legend items. - """ - legend_items_html = "" - for item in legend_items: - if "border" in item: - style = "dashed" if item["dashed"] else "solid" - legend_items_html += f""" -
-
-
{item["label"]}
-
- """ - elif item.get("dashed") is not None: - style = "dashed" if item["dashed"] else "solid" - legend_items_html += f""" -
-
-
{item["label"]}
-
- """ - else: - legend_items_html += f""" -
-
-
{item["label"]}
-
- """ - return legend_items_html diff --git a/lib/crewai/src/crewai/flow/path_utils.py b/lib/crewai/src/crewai/flow/path_utils.py deleted file mode 100644 index 02a893865..000000000 --- a/lib/crewai/src/crewai/flow/path_utils.py +++ /dev/null @@ -1,133 +0,0 @@ -""" -Path utilities for secure file operations in CrewAI flow module. - -This module provides utilities for secure path handling to prevent directory -traversal attacks and ensure paths remain within allowed boundaries. -""" - -from pathlib import Path - - -def safe_path_join(*parts: str, root: str | Path | None = None) -> str: - """ - Safely join path components and ensure the result is within allowed boundaries. - - Parameters - ---------- - *parts : str - Variable number of path components to join. - root : Union[str, Path, None], optional - Root directory to use as base. If None, uses current working directory. - - Returns - ------- - str - String representation of the resolved path. - - Raises - ------ - ValueError - If the resulting path would be outside the root directory - or if any path component is invalid. - """ - if not parts: - raise ValueError("No path components provided") - - try: - # Convert all parts to strings and clean them - clean_parts = [str(part).strip() for part in parts if part] - if not clean_parts: - raise ValueError("No valid path components provided") - - # Establish root directory - root_path = Path(root).resolve() if root else Path.cwd() - - # Join and resolve the full path - full_path = Path(root_path, *clean_parts).resolve() - - # Check if the resolved path is within root - if not str(full_path).startswith(str(root_path)): - raise ValueError( - f"Invalid path: Potential directory traversal. Path must be within {root_path}" - ) - - return str(full_path) - - except Exception as e: - if isinstance(e, ValueError): - raise - raise ValueError(f"Invalid path components: {e!s}") from e - - -def validate_path_exists(path: str | Path, file_type: str = "file") -> str: - """ - Validate that a path exists and is of the expected type. - - Parameters - ---------- - path : Union[str, Path] - Path to validate. - file_type : str, optional - Expected type ('file' or 'directory'), by default 'file'. - - Returns - ------- - str - Validated path as string. - - Raises - ------ - ValueError - If path doesn't exist or is not of expected type. - """ - try: - path_obj = Path(path).resolve() - - if not path_obj.exists(): - raise ValueError(f"Path does not exist: {path}") - - if file_type == "file" and not path_obj.is_file(): - raise ValueError(f"Path is not a file: {path}") - if file_type == "directory" and not path_obj.is_dir(): - raise ValueError(f"Path is not a directory: {path}") - - return str(path_obj) - - except Exception as e: - if isinstance(e, ValueError): - raise - raise ValueError(f"Invalid path: {e!s}") from e - - -def list_files(directory: str | Path, pattern: str = "*") -> list[str]: - """ - Safely list files in a directory matching a pattern. - - Parameters - ---------- - directory : Union[str, Path] - Directory to search in. - pattern : str, optional - Glob pattern to match files against, by default "*". - - Returns - ------- - List[str] - List of matching file paths. - - Raises - ------ - ValueError - If directory is invalid or inaccessible. - """ - try: - dir_path = Path(directory).resolve() - if not dir_path.is_dir(): - raise ValueError(f"Not a directory: {directory}") - - return [str(p) for p in dir_path.glob(pattern) if p.is_file()] - - except Exception as e: - if isinstance(e, ValueError): - raise - raise ValueError(f"Error listing files: {e!s}") from e diff --git a/lib/crewai/src/crewai/flow/utils.py b/lib/crewai/src/crewai/flow/utils.py index 753eb280a..bad9d9670 100644 --- a/lib/crewai/src/crewai/flow/utils.py +++ b/lib/crewai/src/crewai/flow/utils.py @@ -13,14 +13,17 @@ Example >>> ancestors = build_ancestor_dict(flow) """ +from __future__ import annotations + import ast from collections import defaultdict, deque import inspect import textwrap -from typing import Any +from typing import Any, TYPE_CHECKING from typing_extensions import TypeIs +from crewai.flow.constants import OR_CONDITION, AND_CONDITION from crewai.flow.flow_wrappers import ( FlowCondition, FlowConditions, @@ -30,6 +33,8 @@ from crewai.flow.flow_wrappers import ( from crewai.flow.types import FlowMethodCallable, FlowMethodName from crewai.utilities.printer import Printer +if TYPE_CHECKING: + from crewai.flow.flow import Flow _printer = Printer() @@ -74,11 +79,22 @@ def get_possible_return_constants(function: Any) -> list[str] | None: _printer.print(f"Source code:\n{source}", color="yellow") return None - return_values = set() - dict_definitions = {} + return_values: set[str] = set() + dict_definitions: dict[str, list[str]] = {} + variable_values: dict[str, list[str]] = {} - class DictionaryAssignmentVisitor(ast.NodeVisitor): - def visit_Assign(self, node): + def extract_string_constants(node: ast.expr) -> list[str]: + """Recursively extract all string constants from an AST node.""" + strings: list[str] = [] + if isinstance(node, ast.Constant) and isinstance(node.value, str): + strings.append(node.value) + elif isinstance(node, ast.IfExp): + strings.extend(extract_string_constants(node.body)) + strings.extend(extract_string_constants(node.orelse)) + return strings + + class VariableAssignmentVisitor(ast.NodeVisitor): + def visit_Assign(self, node: ast.Assign) -> None: # Check if this assignment is assigning a dictionary literal to a variable if isinstance(node.value, ast.Dict) and len(node.targets) == 1: target = node.targets[0] @@ -92,29 +108,53 @@ def get_possible_return_constants(function: Any) -> list[str] | None: ] if dict_values: dict_definitions[var_name] = dict_values + + if len(node.targets) == 1: + target = node.targets[0] + var_name_alt: str | None = None + if isinstance(target, ast.Name): + var_name_alt = target.id + elif isinstance(target, ast.Attribute): + var_name_alt = f"{target.value.id if isinstance(target.value, ast.Name) else '_'}.{target.attr}" + + if var_name_alt: + strings = extract_string_constants(node.value) + if strings: + variable_values[var_name_alt] = strings + self.generic_visit(node) class ReturnVisitor(ast.NodeVisitor): - def visit_Return(self, node): - # Direct string return - if isinstance(node.value, ast.Constant) and isinstance( - node.value.value, str + def visit_Return(self, node: ast.Return) -> None: + if ( + node.value + and isinstance(node.value, ast.Constant) + and isinstance(node.value.value, str) ): return_values.add(node.value.value) - # Dictionary-based return, like return paths[result] - elif isinstance(node.value, ast.Subscript): - # Check if we're subscripting a known dictionary variable + elif node.value and isinstance(node.value, ast.Subscript): if isinstance(node.value.value, ast.Name): - var_name = node.value.value.id - if var_name in dict_definitions: - # Add all possible dictionary values - for v in dict_definitions[var_name]: + var_name_dict = node.value.value.id + if var_name_dict in dict_definitions: + for v in dict_definitions[var_name_dict]: return_values.add(v) + elif node.value: + var_name_ret: str | None = None + if isinstance(node.value, ast.Name): + var_name_ret = node.value.id + elif isinstance(node.value, ast.Attribute): + var_name_ret = f"{node.value.value.id if isinstance(node.value.value, ast.Name) else '_'}.{node.value.attr}" + + if var_name_ret and var_name_ret in variable_values: + for v in variable_values[var_name_ret]: + return_values.add(v) + self.generic_visit(node) - # First pass: identify dictionary assignments - DictionaryAssignmentVisitor().visit(code_ast) - # Second pass: identify returns + def visit_If(self, node: ast.If) -> None: + self.generic_visit(node) + + VariableAssignmentVisitor().visit(code_ast) ReturnVisitor().visit(code_ast) return list(return_values) if return_values else None @@ -158,7 +198,15 @@ def calculate_node_levels(flow: Any) -> dict[str, int]: # Precompute listener dependencies or_listeners = defaultdict(list) and_listeners = defaultdict(set) - for listener_name, (condition_type, trigger_methods) in flow._listeners.items(): + for listener_name, condition_data in flow._listeners.items(): + if isinstance(condition_data, tuple): + condition_type, trigger_methods = condition_data + elif isinstance(condition_data, dict): + trigger_methods = _extract_all_methods_recursive(condition_data, flow) + condition_type = condition_data.get("type", "OR") + else: + continue + if condition_type == "OR": for method in trigger_methods: or_listeners[method].append(listener_name) @@ -192,9 +240,13 @@ def calculate_node_levels(flow: Any) -> dict[str, int]: if listener_name not in visited: queue.append(listener_name) - # Handle router connections process_router_paths(flow, current, current_level, levels, queue) + max_level = max(levels.values()) if levels else 0 + for method_name in flow._methods: + if method_name not in levels: + levels[method_name] = max_level + 1 + return levels @@ -215,8 +267,14 @@ def count_outgoing_edges(flow: Any) -> dict[str, int]: counts = {} for method_name in flow._methods: counts[method_name] = 0 - for method_name in flow._listeners: - _, trigger_methods = flow._listeners[method_name] + for condition_data in flow._listeners.values(): + if isinstance(condition_data, tuple): + _, trigger_methods = condition_data + elif isinstance(condition_data, dict): + trigger_methods = _extract_all_methods_recursive(condition_data, flow) + else: + continue + for trigger in trigger_methods: if trigger in flow._methods: counts[trigger] += 1 @@ -271,21 +329,34 @@ def dfs_ancestors( return visited.add(node) - # Handle regular listeners - for listener_name, (_, trigger_methods) in flow._listeners.items(): + for listener_name, condition_data in flow._listeners.items(): + if isinstance(condition_data, tuple): + _, trigger_methods = condition_data + elif isinstance(condition_data, dict): + trigger_methods = _extract_all_methods_recursive(condition_data, flow) + else: + continue + if node in trigger_methods: ancestors[listener_name].add(node) ancestors[listener_name].update(ancestors[node]) dfs_ancestors(listener_name, ancestors, visited, flow) - # Handle router methods separately if node in flow._routers: router_method_name = node paths = flow._router_paths.get(router_method_name, []) for path in paths: - for listener_name, (_, trigger_methods) in flow._listeners.items(): + for listener_name, condition_data in flow._listeners.items(): + if isinstance(condition_data, tuple): + _, trigger_methods = condition_data + elif isinstance(condition_data, dict): + trigger_methods = _extract_all_methods_recursive( + condition_data, flow + ) + else: + continue + if path in trigger_methods: - # Only propagate the ancestors of the router method, not the router method itself ancestors[listener_name].update(ancestors[node]) dfs_ancestors(listener_name, ancestors, visited, flow) @@ -335,19 +406,32 @@ def build_parent_children_dict(flow: Any) -> dict[str, list[str]]: """ parent_children: dict[str, list[str]] = {} - # Map listeners to their trigger methods - for listener_name, (_, trigger_methods) in flow._listeners.items(): + for listener_name, condition_data in flow._listeners.items(): + if isinstance(condition_data, tuple): + _, trigger_methods = condition_data + elif isinstance(condition_data, dict): + trigger_methods = _extract_all_methods_recursive(condition_data, flow) + else: + continue + for trigger in trigger_methods: if trigger not in parent_children: parent_children[trigger] = [] if listener_name not in parent_children[trigger]: parent_children[trigger].append(listener_name) - # Map router methods to their paths and to listeners for router_method_name, paths in flow._router_paths.items(): for path in paths: - # Map router method to listeners of each path - for listener_name, (_, trigger_methods) in flow._listeners.items(): + for listener_name, condition_data in flow._listeners.items(): + if isinstance(condition_data, tuple): + _, trigger_methods = condition_data + elif isinstance(condition_data, dict): + trigger_methods = _extract_all_methods_recursive( + condition_data, flow + ) + else: + continue + if path in trigger_methods: if router_method_name not in parent_children: parent_children[router_method_name] = [] @@ -382,17 +466,27 @@ def get_child_index( return children.index(child) -def process_router_paths(flow, current, current_level, levels, queue): - """ - Handle the router connections for the current node. - """ +def process_router_paths( + flow: Any, + current: str, + current_level: int, + levels: dict[str, int], + queue: deque[str], +) -> None: + """Handle the router connections for the current node.""" if current in flow._routers: paths = flow._router_paths.get(current, []) for path in paths: - for listener_name, ( - _condition_type, - trigger_methods, - ) in flow._listeners.items(): + for listener_name, condition_data in flow._listeners.items(): + if isinstance(condition_data, tuple): + _condition_type, trigger_methods = condition_data + elif isinstance(condition_data, dict): + trigger_methods = _extract_all_methods_recursive( + condition_data, flow + ) + else: + continue + if path in trigger_methods: if ( listener_name not in levels @@ -413,7 +507,7 @@ def is_flow_method_name(obj: Any) -> TypeIs[FlowMethodName]: return isinstance(obj, str) -def is_flow_method_callable(obj: Any) -> TypeIs[FlowMethodCallable]: +def is_flow_method_callable(obj: Any) -> TypeIs[FlowMethodCallable[..., Any]]: """Check if the object is a callable flow method. Args: @@ -517,3 +611,107 @@ def is_flow_condition_dict(obj: Any) -> TypeIs[FlowCondition]: return False return True + + +def _extract_all_methods_recursive( + condition: str | FlowCondition | dict[str, Any] | list[Any], + flow: Flow[Any] | None = None, +) -> list[FlowMethodName]: + """Extract ALL method names from a condition tree recursively. + + This function recursively extracts every method name from the entire + condition tree, regardless of nesting. Used for visualization and debugging. + + Note: Only extracts actual method names, not router output strings. + If flow is provided, it will filter out strings that are not in flow._methods. + + Args: + condition: Can be a string, dict, or list + flow: Optional flow instance to filter out non-method strings + + Returns: + List of all method names found in the condition tree + """ + if is_flow_method_name(condition): + if flow is not None: + if condition in flow._methods: + return [condition] + return [] + return [condition] + if is_flow_condition_dict(condition): + normalized = _normalize_condition(condition) + methods = [] + for sub_cond in normalized.get("conditions", []): + methods.extend(_extract_all_methods_recursive(sub_cond, flow)) + return methods + if isinstance(condition, list): + methods = [] + for item in condition: + methods.extend(_extract_all_methods_recursive(item, flow)) + return methods + return [] + + +def _normalize_condition( + condition: FlowConditions | FlowCondition | FlowMethodName, +) -> FlowCondition: + """Normalize a condition to standard format with 'conditions' key. + + Args: + condition: Can be a string (method name), dict (condition), or list + + Returns: + Normalized dict with 'type' and 'conditions' keys + """ + if is_flow_method_name(condition): + return {"type": OR_CONDITION, "conditions": [condition]} + if is_flow_condition_dict(condition): + if "conditions" in condition: + return condition + if "methods" in condition: + return {"type": condition["type"], "conditions": condition["methods"]} + return condition + if is_flow_condition_list(condition): + return {"type": OR_CONDITION, "conditions": condition} + + raise ValueError(f"Cannot normalize condition: {condition}") + + +def _extract_all_methods( + condition: str | FlowCondition | dict[str, Any] | list[Any], +) -> list[FlowMethodName]: + """Extract all method names from a condition (including nested). + + For AND conditions, this extracts methods that must ALL complete. + For OR conditions nested inside AND, we don't extract their methods + since only one branch of the OR needs to trigger, not all methods. + + This function is used for runtime execution logic, where we need to know + which methods must complete for AND conditions. For visualization purposes, + use _extract_all_methods_recursive() instead. + + Args: + condition: Can be a string, dict, or list + + Returns: + List of all method names in the condition tree that must complete + """ + if is_flow_method_name(condition): + return [condition] + if is_flow_condition_dict(condition): + normalized = _normalize_condition(condition) + cond_type = normalized.get("type", OR_CONDITION) + + if cond_type == AND_CONDITION: + return [ + sub_cond + for sub_cond in normalized.get("conditions", []) + if is_flow_method_name(sub_cond) + ] + return [] + if isinstance(condition, list): + methods = [] + for item in condition: + methods.extend(_extract_all_methods(item)) + return methods + return [] diff --git a/lib/crewai/src/crewai/flow/visualization/__init__.py b/lib/crewai/src/crewai/flow/visualization/__init__.py new file mode 100644 index 000000000..98665f642 --- /dev/null +++ b/lib/crewai/src/crewai/flow/visualization/__init__.py @@ -0,0 +1,25 @@ +"""Flow structure visualization utilities.""" + +from crewai.flow.visualization.builder import ( + build_flow_structure, + calculate_execution_paths, + print_structure_summary, + structure_to_dict, +) +from crewai.flow.visualization.renderers import render_interactive +from crewai.flow.visualization.types import FlowStructure, NodeMetadata, StructureEdge + + +visualize_flow_structure = render_interactive + +__all__ = [ + "FlowStructure", + "NodeMetadata", + "StructureEdge", + "build_flow_structure", + "calculate_execution_paths", + "print_structure_summary", + "render_interactive", + "structure_to_dict", + "visualize_flow_structure", +] diff --git a/lib/crewai/src/crewai/flow/visualization/assets/interactive.js b/lib/crewai/src/crewai/flow/visualization/assets/interactive.js new file mode 100644 index 000000000..c6998becd --- /dev/null +++ b/lib/crewai/src/crewai/flow/visualization/assets/interactive.js @@ -0,0 +1,1681 @@ +function loadVisCDN() { + return new Promise((resolve, reject) => { + const script = document.createElement('script'); + script.src = 'https://unpkg.com/vis-network@9.1.2/dist/vis-network.min.js'; + script.onload = resolve; + script.onerror = reject; + document.head.appendChild(script); + }); +} + +(async () => { + + try { + await loadVisCDN(); + const nodes = new vis.DataSet('{{ nodes_list_json }}'); + const edges = new vis.DataSet('{{ edges_list_json }}'); + + const container = document.getElementById('network'); + const data = { + nodes: nodes, + edges: edges + }; + + const options = { + nodes: { + shape: 'custom', + shadow: false, + chosen: false, + size: 30, + ctxRenderer: function ({ctx, id, x, y, state: {selected, hover}, style, label}) { + const node = nodes.get(id); + if (!node || !node.nodeStyle) return {}; + + const nodeStyle = node.nodeStyle; + const baseWidth = 200; + const baseHeight = 60; + + let scale = 1.0; + if (pressedNodeId === id) { + scale = 0.98; + } else if (hoveredNodeId === id) { + scale = 1.04; + } + + const isActiveDrawer = activeDrawerNodeId === id; + + const width = baseWidth * scale; + const height = baseHeight * scale; + + return { + drawNode() { + ctx.save(); + + const nodeOpacity = node.opacity !== undefined ? node.opacity : 1.0; + ctx.globalAlpha = nodeOpacity; + + if (node.shadow && node.shadow.enabled) { + ctx.shadowColor = node.shadow.color || '{{ CREWAI_ORANGE }}'; + ctx.shadowBlur = node.shadow.size || 20; + ctx.shadowOffsetX = node.shadow.x || 0; + ctx.shadowOffsetY = node.shadow.y || 0; + } else if (isActiveDrawer) { + ctx.shadowColor = '{{ CREWAI_ORANGE }}'; + ctx.shadowBlur = 20; + ctx.shadowOffsetX = 0; + ctx.shadowOffsetY = 0; + } else { + ctx.shadowColor = 'rgba(0,0,0,0.1)'; + ctx.shadowBlur = 8; + ctx.shadowOffsetX = 2; + ctx.shadowOffsetY = 2; + } + + const radius = 20 * scale; + const rectX = x - width / 2; + const rectY = y - height / 2; + + ctx.beginPath(); + ctx.moveTo(rectX + radius, rectY); + ctx.lineTo(rectX + width - radius, rectY); + ctx.quadraticCurveTo(rectX + width, rectY, rectX + width, rectY + radius); + ctx.lineTo(rectX + width, rectY + height - radius); + ctx.quadraticCurveTo(rectX + width, rectY + height, rectX + width - radius, rectY + height); + ctx.lineTo(rectX + radius, rectY + height); + ctx.quadraticCurveTo(rectX, rectY + height, rectX, rectY + height - radius); + ctx.lineTo(rectX, rectY + radius); + ctx.quadraticCurveTo(rectX, rectY, rectX + radius, rectY); + ctx.closePath(); + + ctx.fillStyle = nodeStyle.bgColor; + ctx.fill(); + + ctx.shadowColor = 'transparent'; + ctx.shadowBlur = 0; + + const borderWidth = isActiveDrawer ? nodeStyle.borderWidth * 2 : nodeStyle.borderWidth; + ctx.strokeStyle = isActiveDrawer ? '{{ CREWAI_ORANGE }}' : nodeStyle.borderColor; + ctx.lineWidth = borderWidth * scale; + ctx.stroke(); + + ctx.font = `500 ${13 * scale}px 'JetBrains Mono', 'SF Mono', 'Monaco', 'Menlo', 'Consolas', monospace`; + ctx.textAlign = 'center'; + ctx.textBaseline = 'middle'; + + const textMetrics = ctx.measureText(nodeStyle.name); + const textWidth = textMetrics.width; + const textHeight = 13 * scale; + const textPadding = 8 * scale; + const textBgRadius = 6 * scale; + + const textBgX = x - textWidth / 2 - textPadding; + const textBgY = y - textHeight / 2 - textPadding / 2; + const textBgWidth = textWidth + textPadding * 2; + const textBgHeight = textHeight + textPadding; + + ctx.beginPath(); + ctx.moveTo(textBgX + textBgRadius, textBgY); + ctx.lineTo(textBgX + textBgWidth - textBgRadius, textBgY); + ctx.quadraticCurveTo(textBgX + textBgWidth, textBgY, textBgX + textBgWidth, textBgY + textBgRadius); + ctx.lineTo(textBgX + textBgWidth, textBgY + textBgHeight - textBgRadius); + ctx.quadraticCurveTo(textBgX + textBgWidth, textBgY + textBgHeight, textBgX + textBgWidth - textBgRadius, textBgY + textBgHeight); + ctx.lineTo(textBgX + textBgRadius, textBgY + textBgHeight); + ctx.quadraticCurveTo(textBgX, textBgY + textBgHeight, textBgX, textBgY + textBgHeight - textBgRadius); + ctx.lineTo(textBgX, textBgY + textBgRadius); + ctx.quadraticCurveTo(textBgX, textBgY, textBgX + textBgRadius, textBgY); + ctx.closePath(); + + ctx.fillStyle = 'rgba(255, 255, 255, 0.2)'; + ctx.fill(); + + ctx.fillStyle = nodeStyle.fontColor; + ctx.fillText(nodeStyle.name, x, y); + + ctx.restore(); + }, + nodeDimensions: {width, height} + }; + }, + scaling: { + min: 1, + max: 100 + } + }, + edges: { + width: 2, + hoverWidth: 0, + labelHighlightBold: false, + shadow: { + enabled: true, + color: 'rgba(0,0,0,0.08)', + size: 4, + x: 1, + y: 1 + }, + smooth: { + type: 'cubicBezier', + roundness: 0.5 + }, + font: { + size: 13, + align: 'middle', + color: 'transparent', + face: 'Inter, -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif', + strokeWidth: 0, + background: 'transparent', + vadjust: 0, + bold: { + face: 'Inter, -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif', + mod: 'bold', + vadjust: 0 + } + }, + arrows: { + to: { + enabled: true, + scaleFactor: 0.8, + type: 'triangle' + } + }, + arrowStrikethrough: true, + chosen: { + edge: false, + label: false + } + }, + physics: { + enabled: true, + hierarchicalRepulsion: { + nodeDistance: 180, + centralGravity: 0.0, + springLength: 150, + springConstant: 0.01, + damping: 0.09 + }, + solver: 'hierarchicalRepulsion', + stabilization: { + enabled: true, + iterations: 200, + updateInterval: 25 + } + }, + layout: { + hierarchical: { + enabled: true, + direction: 'UD', + sortMethod: 'directed', + levelSeparation: 180, + nodeSpacing: 220, + treeSpacing: 250 + } + }, + interaction: { + hover: true, + hoverConnectedEdges: false, + navigationButtons: false, + keyboard: true, + selectConnectedEdges: false, + multiselect: false + } + }; + + const network = new vis.Network(container, data, options); + + const ideSelector = document.getElementById('ide-selector'); + const savedIDE = localStorage.getItem('preferred_ide') || 'auto'; + ideSelector.value = savedIDE; + ideSelector.addEventListener('change', function () { + localStorage.setItem('preferred_ide', this.value); + }); + + const highlightCanvas = document.getElementById('highlight-canvas'); + const highlightCtx = highlightCanvas.getContext('2d'); + + function resizeHighlightCanvas() { + highlightCanvas.width = window.innerWidth; + highlightCanvas.height = window.innerHeight; + } + + resizeHighlightCanvas(); + window.addEventListener('resize', resizeHighlightCanvas); + + let highlightedNodes = []; + let highlightedEdges = []; + let nodeRestoreAnimationId = null; + let edgeRestoreAnimationId = null; + + function drawHighlightLayer() { + highlightCtx.clearRect(0, 0, highlightCanvas.width, highlightCanvas.height); + + if (highlightedNodes.length === 0) return; + + highlightedNodes.forEach(function (nodeId) { + const nodePosition = network.getPositions([nodeId])[nodeId]; + if (!nodePosition) return; + + const canvasPos = network.canvasToDOM(nodePosition); + const node = nodes.get(nodeId); + if (!node || !node.nodeStyle) return; + + const nodeStyle = node.nodeStyle; + const baseWidth = 200; + const baseHeight = 60; + const scale = 1.0; + const width = baseWidth * scale; + const height = baseHeight * scale; + + highlightCtx.save(); + + highlightCtx.shadowColor = '{{ CREWAI_ORANGE }}'; + highlightCtx.shadowBlur = 20; + highlightCtx.shadowOffsetX = 0; + highlightCtx.shadowOffsetY = 0; + + const radius = 20 * scale; + const rectX = canvasPos.x - width / 2; + const rectY = canvasPos.y - height / 2; + + highlightCtx.beginPath(); + highlightCtx.moveTo(rectX + radius, rectY); + highlightCtx.lineTo(rectX + width - radius, rectY); + highlightCtx.quadraticCurveTo(rectX + width, rectY, rectX + width, rectY + radius); + highlightCtx.lineTo(rectX + width, rectY + height - radius); + highlightCtx.quadraticCurveTo(rectX + width, rectY + height, rectX + width - radius, rectY + height); + highlightCtx.lineTo(rectX + radius, rectY + height); + highlightCtx.quadraticCurveTo(rectX, rectY + height, rectX, rectY + height - radius); + highlightCtx.lineTo(rectX, rectY + radius); + highlightCtx.quadraticCurveTo(rectX, rectY, rectX + radius, rectY); + highlightCtx.closePath(); + + highlightCtx.fillStyle = nodeStyle.bgColor; + highlightCtx.fill(); + + highlightCtx.shadowColor = 'transparent'; + highlightCtx.shadowBlur = 0; + + highlightCtx.strokeStyle = '{{ CREWAI_ORANGE }}'; + highlightCtx.lineWidth = nodeStyle.borderWidth * 2 * scale; + highlightCtx.stroke(); + + highlightCtx.fillStyle = nodeStyle.fontColor; + highlightCtx.font = `500 ${15 * scale}px Inter, -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif`; + highlightCtx.textAlign = 'center'; + highlightCtx.textBaseline = 'middle'; + highlightCtx.fillText(nodeStyle.name, canvasPos.x, canvasPos.y); + + highlightCtx.restore(); + }); + } + + function highlightTriggeredBy(triggerNodeId) { + clearTriggeredByHighlight(); + + if (activeDrawerEdges && activeDrawerEdges.length > 0) { + activeDrawerEdges.forEach(function (edgeId) { + edges.update({ + id: edgeId, + width: 2, + opacity: 1.0 + }); + }); + activeDrawerEdges = []; + } + + if (!activeDrawerNodeId || !triggerNodeId) return; + + const allEdges = edges.get(); + let connectingEdges = []; + let actualTriggerNodeId = triggerNodeId; + + connectingEdges = allEdges.filter(edge => + edge.from === triggerNodeId && edge.to === activeDrawerNodeId + ); + + if (connectingEdges.length === 0) { + const incomingRouterEdges = allEdges.filter(edge => + edge.to === activeDrawerNodeId && edge.dashes + ); + + if (incomingRouterEdges.length > 0) { + incomingRouterEdges.forEach(function (edge) { + connectingEdges.push(edge); + actualTriggerNodeId = edge.from; + }); + } else { + const outgoingRouterEdges = allEdges.filter(edge => + edge.from === activeDrawerNodeId && edge.dashes + ); + + const nodeData = '{{ nodeData }}'; + for (const [nodeName, nodeInfo] of Object.entries(nodeData)) { + if (nodeInfo.trigger_methods && nodeInfo.trigger_methods.includes(triggerNodeId)) { + const edgeToTarget = outgoingRouterEdges.find(e => e.to === nodeName); + if (edgeToTarget) { + connectingEdges.push(edgeToTarget); + actualTriggerNodeId = nodeName; + break; + } + } + } + } + } + + if (connectingEdges.length === 0) return; + + highlightedNodes = [actualTriggerNodeId, activeDrawerNodeId]; + highlightedEdges = connectingEdges.map(e => e.id); + + const allNodesList = nodes.get(); + const nodeAnimDuration = 300; + const nodeAnimStart = performance.now(); + + function animateNodeOpacity() { + const elapsed = performance.now() - nodeAnimStart; + const progress = Math.min(elapsed / nodeAnimDuration, 1); + const eased = 1 - Math.pow(1 - progress, 3); + + allNodesList.forEach(function (node) { + const currentOpacity = node.opacity !== undefined ? node.opacity : 1.0; + const targetOpacity = highlightedNodes.includes(node.id) ? 1.0 : 0.2; + const newOpacity = currentOpacity + (targetOpacity - currentOpacity) * eased; + + nodes.update({ + id: node.id, + opacity: newOpacity + }); + }); + + if (progress < 1) { + requestAnimationFrame(animateNodeOpacity); + } + } + + animateNodeOpacity(); + + const allEdgesList = edges.get(); + const edgeAnimDuration = 300; + const edgeAnimStart = performance.now(); + + function animateEdgeStyles() { + const elapsed = performance.now() - edgeAnimStart; + const progress = Math.min(elapsed / edgeAnimDuration, 1); + const eased = 1 - Math.pow(1 - progress, 3); + + allEdgesList.forEach(function (edge) { + if (highlightedEdges.includes(edge.id)) { + const currentWidth = edge.width || 2; + const targetWidth = 8; + const newWidth = currentWidth + (targetWidth - currentWidth) * eased; + + const currentShadowSize = edge.shadow?.size || 4; + const targetShadowSize = 20; + const newShadowSize = currentShadowSize + (targetShadowSize - currentShadowSize) * eased; + + const updateData = { + id: edge.id, + hidden: false, + opacity: 1.0, + width: newWidth, + color: { + color: '{{ CREWAI_ORANGE }}', + highlight: '{{ CREWAI_ORANGE }}' + }, + shadow: { + enabled: true, + color: '{{ CREWAI_ORANGE }}', + size: newShadowSize, + x: 0, + y: 0 + } + }; + + if (edge.dashes) { + const scale = Math.sqrt(newWidth / 2); + updateData.dashes = [15 * scale, 10 * scale]; + } + + updateData.arrows = { + to: { + enabled: true, + scaleFactor: 0.8, + type: 'triangle' + } + }; + + updateData.color = { + color: '{{ CREWAI_ORANGE }}', + highlight: '{{ CREWAI_ORANGE }}', + hover: '{{ CREWAI_ORANGE }}', + inherit: 'to' + }; + + edges.update(updateData); + } else { + edges.update({ + id: edge.id, + hidden: false, + opacity: 1.0, + width: 1, + color: { + color: 'transparent', + highlight: 'transparent' + }, + shadow: { + enabled: false + }, + font: { + color: 'transparent', + background: 'transparent' + } + }); + } + }); + + if (progress < 1) { + requestAnimationFrame(animateEdgeStyles); + } + } + + animateEdgeStyles(); + + highlightCanvas.classList.add('visible'); + + setTimeout(function () { + drawHighlightLayer(); + }, 50); + } + + function clearTriggeredByHighlight() { + const allNodesList = nodes.get(); + const nodeRestoreAnimStart = performance.now(); + const nodeRestoreAnimDuration = 300; + + function animateNodeRestore() { + if (isAnimating) { + nodeRestoreAnimationId = null; + return; + } + + const elapsed = performance.now() - nodeRestoreAnimStart; + const progress = Math.min(elapsed / nodeRestoreAnimDuration, 1); + const eased = 1 - Math.pow(1 - progress, 3); + + allNodesList.forEach(function (node) { + const currentOpacity = node.opacity !== undefined ? node.opacity : 1.0; + const targetOpacity = 1.0; + const newOpacity = currentOpacity + (targetOpacity - currentOpacity) * eased; + nodes.update({id: node.id, opacity: newOpacity}); + }); + + if (progress < 1) { + nodeRestoreAnimationId = requestAnimationFrame(animateNodeRestore); + } else { + nodeRestoreAnimationId = null; + } + } + + if (nodeRestoreAnimationId) { + cancelAnimationFrame(nodeRestoreAnimationId); + } + nodeRestoreAnimationId = requestAnimationFrame(animateNodeRestore); + + const allEdgesList = edges.get(); + const edgeRestoreAnimStart = performance.now(); + const edgeRestoreAnimDuration = 300; + + function animateEdgeRestore() { + if (isAnimating) { + edgeRestoreAnimationId = null; + return; + } + + const elapsed = performance.now() - edgeRestoreAnimStart; + const progress = Math.min(elapsed / edgeRestoreAnimDuration, 1); + const eased = 1 - Math.pow(1 - progress, 3); + + allEdgesList.forEach(function (edge) { + if (activeDrawerEdges.includes(edge.id)) { + return; + } + + const defaultColor = edge.dashes || edge.label === 'AND' ? '{{ CREWAI_ORANGE }}' : '{{ GRAY }}'; + const currentOpacity = edge.opacity !== undefined ? edge.opacity : 1.0; + const currentWidth = edge.width !== undefined ? edge.width : 2; + const currentShadowSize = edge.shadow && edge.shadow.size !== undefined ? edge.shadow.size : 4; + + const targetOpacity = 1.0; + const targetWidth = 2; + const targetShadowSize = 4; + + const newOpacity = currentOpacity + (targetOpacity - currentOpacity) * eased; + const newWidth = currentWidth + (targetWidth - currentWidth) * eased; + const newShadowSize = currentShadowSize + (targetShadowSize - currentShadowSize) * eased; + + const updateData = { + id: edge.id, + hidden: false, + opacity: newOpacity, + width: newWidth, + color: { + color: defaultColor, + highlight: defaultColor + }, + shadow: { + enabled: true, + color: 'rgba(0,0,0,0.08)', + size: newShadowSize, + x: 1, + y: 1 + }, + font: { + color: 'transparent', + background: 'transparent' + } + }; + + if (edge.dashes) { + const scale = Math.sqrt(newWidth / 2); + updateData.dashes = [15 * scale, 10 * scale]; + } + + edges.update(updateData); + }); + + if (progress < 1) { + edgeRestoreAnimationId = requestAnimationFrame(animateEdgeRestore); + } else { + edgeRestoreAnimationId = null; + } + } + + if (edgeRestoreAnimationId) { + cancelAnimationFrame(edgeRestoreAnimationId); + } + edgeRestoreAnimationId = requestAnimationFrame(animateEdgeRestore); + + highlightedNodes = []; + highlightedEdges = []; + + highlightCanvas.style.transition = 'opacity 300ms ease-out'; + highlightCanvas.style.opacity = '0'; + setTimeout(function () { + highlightCanvas.classList.remove('visible'); + highlightCanvas.style.opacity = '1'; + highlightCanvas.style.transition = ''; + highlightCtx.clearRect(0, 0, highlightCanvas.width, highlightCanvas.height); + }, 300); + } + + network.on('afterDrawing', function () { + if (highlightCanvas.classList.contains('visible')) { + drawHighlightLayer(); + } + }); + + let hoveredNodeId = null; + let pressedNodeId = null; + let isClicking = false; + let activeDrawerNodeId = null; + let activeDrawerEdges = []; + + const edgeAnimations = {}; + + function animateEdgeWidth(edgeId, targetWidth, duration) { + if (edgeAnimations[edgeId]) { + cancelAnimationFrame(edgeAnimations[edgeId].frameId); + } + + const edge = edges.get(edgeId); + if (!edge) return; + + const startWidth = edge.width || 2; + const startTime = performance.now(); + + function animate() { + const currentTime = performance.now(); + const elapsed = currentTime - startTime; + const progress = Math.min(elapsed / duration, 1); + const eased = 1 - Math.pow(1 - progress, 3); + const currentWidth = startWidth + (targetWidth - startWidth) * eased; + + edges.update({ + id: edgeId, + width: currentWidth + }); + + if (progress < 1) { + edgeAnimations[edgeId] = { + frameId: requestAnimationFrame(animate) + }; + } else { + delete edgeAnimations[edgeId]; + } + } + + animate(); + } + + network.on('hoverNode', function (params) { + const nodeId = params.node; + hoveredNodeId = nodeId; + document.body.style.cursor = 'pointer'; + network.redraw(); + }); + + network.on('blurNode', function (params) { + const nodeId = params.node; + if (hoveredNodeId === nodeId) { + hoveredNodeId = null; + } + document.body.style.cursor = 'default'; + network.redraw(); + }); + + let pressedEdges = []; + + network.on('selectNode', function (params) { + if (params.nodes.length > 0) { + const nodeId = params.nodes[0]; + pressedNodeId = nodeId; + + const connectedEdges = network.getConnectedEdges(nodeId); + pressedEdges = connectedEdges; + + network.redraw(); + } + }); + + network.on('deselectNode', function (params) { + if (pressedNodeId) { + const nodeId = pressedNodeId; + + setTimeout(function () { + if (isClicking) { + isClicking = false; + pressedNodeId = null; + pressedEdges = []; + return; + } + + pressedNodeId = null; + + pressedEdges.forEach(function (edgeId) { + if (!activeDrawerEdges.includes(edgeId)) { + animateEdgeWidth(edgeId, 2, 150); + } + }); + pressedEdges = []; + network.redraw(); + }, 10); + } + }); + let highlightedNodeId = null; + let highlightedSourceNodeId = null; + let highlightedEdgeId = null; + let highlightTimeout = null; + let originalNodeData = null; + let originalSourceNodeData = null; + let originalEdgeData = null; + let isAnimating = false; + + function clearHighlights() { + isAnimating = false; + + if (highlightTimeout) { + clearTimeout(highlightTimeout); + highlightTimeout = null; + } + + if (originalNodeData && originalNodeData.originalOpacities) { + originalNodeData.originalOpacities.forEach((opacity, nodeId) => { + nodes.update({ + id: nodeId, + opacity: opacity + }); + }); + } + + if (originalNodeData && originalNodeData.originalEdgesMap) { + originalNodeData.originalEdgesMap.forEach((edgeData, edgeId) => { + edges.update({ + id: edgeId, + opacity: edgeData.opacity, + width: edgeData.width, + color: edgeData.color + }); + }); + } + + if (highlightedNodeId) { + if (originalNodeData && originalNodeData.shadow) { + nodes.update({ + id: highlightedNodeId, + shadow: originalNodeData.shadow + }); + } else { + nodes.update({ + id: highlightedNodeId, + shadow: { + enabled: true, + color: 'rgba(0,0,0,0.1)', + size: 8, + x: 2, + y: 2 + } + }); + } + highlightedNodeId = null; + originalNodeData = null; + } + + if (highlightedEdgeId) { + if (originalEdgeData && originalEdgeData.shadow) { + edges.update({ + id: highlightedEdgeId, + shadow: originalEdgeData.shadow + }); + } else { + edges.update({ + id: highlightedEdgeId, + shadow: { + enabled: true, + color: 'rgba(0,0,0,0.08)', + size: 4, + x: 1, + y: 1 + } + }); + } + highlightedEdgeId = null; + originalEdgeData = null; + } + + if (highlightedSourceNodeId) { + if (originalSourceNodeData && originalSourceNodeData.shadow) { + nodes.update({ + id: highlightedSourceNodeId, + shadow: originalSourceNodeData.shadow + }); + } else { + nodes.update({ + id: highlightedSourceNodeId, + shadow: null + }); + } + highlightedSourceNodeId = null; + originalSourceNodeData = null; + } + } + + + function highlightPython(code) { + return Prism.highlight(code, Prism.languages.python, 'python'); + } + + function highlightJson(jsonString) { + let escaped = jsonString + .replace(/&/g, '&') + .replace(//g, '>'); + + let result = escaped + .replace(/(")([^&]+?)(")(\\s*)(:)/g, '$1$2$3$4$5') + .replace(/(:)(\\s*)(")([^&]*?)(")/g, '$1$2$3$4$5') + .replace(/(:)(\\s*)([-+]?\\d+\\.?\\d*)/g, '$1$2$3') + .replace(/:\\s*(true|false)\\b/g, ': $1') + .replace(/:\\s*null\\b/g, ': null') + .replace(/([{\\[\\]}])/g, '$1') + .replace(/,/g, ','); + + return result; + } + + network.on('click', function (params) { + if (params.nodes.length > 0) { + const nodeId = params.nodes[0]; + const node = nodes.get(nodeId); + const nodeData = '{{ nodeData }}'; + const metadata = nodeData[nodeId]; + + isClicking = true; + + clearTriggeredByHighlight(); + if (activeDrawerNodeId && activeDrawerNodeId !== nodeId) { + activeDrawerEdges.forEach(function (edgeId) { + animateEdgeWidth(edgeId, 2, 200); + }); + } + + activeDrawerNodeId = nodeId; + const connectedEdges = network.getConnectedEdges(nodeId); + activeDrawerEdges = connectedEdges; + + setTimeout(function () { + activeDrawerEdges.forEach(function (edgeId) { + animateEdgeWidth(edgeId, 5, 200); + }); + + network.redraw(); + }, 15); + + openDrawer(nodeId, metadata); + clearHighlights(); + } else if (params.edges.length === 0) { + clearHighlights(); + closeDrawer(); + } + }); + + function openDrawer(nodeName, metadata) { + const drawer = document.getElementById('drawer'); + const overlay = document.getElementById('drawer-overlay'); + const drawerTitle = document.getElementById('drawer-node-name'); + const drawerContent = document.getElementById('drawer-content'); + const openIdeButton = document.getElementById('drawer-open-ide'); + + drawerTitle.textContent = nodeName; + if (metadata.source_file && metadata.source_start_line) { + openIdeButton.style.display = 'flex'; + openIdeButton.onclick = function () { + const filePath = metadata.source_file; + const lineNum = metadata.source_start_line; + + function detectIDE() { + const savedIDE = localStorage.getItem('preferred_ide'); + if (savedIDE) return savedIDE; + + if (navigator.userAgent.includes('JetBrains')) return 'jetbrains'; + + return 'auto'; + } + + const detectedIDE = detectIDE(); + let ideUrl; + + if (detectedIDE === 'pycharm' || detectedIDE === 'auto') { + ideUrl = `pycharm://open?file=${filePath}&line=${lineNum}`; + } else if (detectedIDE === 'vscode') { + ideUrl = `vscode://file/${filePath}:${lineNum}`; + } else if (detectedIDE === 'jetbrains') { + ideUrl = `jetbrains://open?file=${encodeURIComponent(filePath)}&line=${lineNum}`; + } else { + ideUrl = `pycharm://open?file=${filePath}&line=${lineNum}`; + } + const link = document.createElement('a'); + link.href = ideUrl; + link.target = '_blank'; + document.body.appendChild(link); + link.click(); + document.body.removeChild(link); + + const fallbackText = `${filePath}:${lineNum}`; + navigator.clipboard.writeText(fallbackText).catch(function (err) { + console.error('Failed to copy:', err); + }); + }; + } else { + openIdeButton.style.display = 'none'; + } + + let content = ''; + + let metadataContent = ''; + const nodeType = metadata.type || 'unknown'; + const typeBadgeColor = nodeType === 'start' || nodeType === 'router' ? '{{ CREWAI_ORANGE }}' : '{{ DARK_GRAY }}'; + metadataContent += ` +
+
Type
+ ${nodeType} +
+ `; + + if (metadata.condition_type) { + let conditionColor, conditionBg; + if (metadata.condition_type === 'AND') { + conditionColor = '{{ CREWAI_ORANGE }}'; + conditionBg = 'rgba(255,90,80,0.12)'; + } else if (metadata.condition_type === 'IF') { + conditionColor = '{{ CREWAI_ORANGE }}'; + conditionBg = 'rgba(255,90,80,0.18)'; + } else { + conditionColor = '{{ GRAY }}'; + conditionBg = 'rgba(102,102,102,0.12)'; + } + metadataContent += ` +
+
Condition
+ ${metadata.condition_type} +
+ `; + } + + if (metadata.trigger_methods && metadata.trigger_methods.length > 0) { + metadataContent += ` +
+
Triggered By
+ +
+ `; + } + + if (metadata.router_paths && metadata.router_paths.length > 0) { + metadataContent += ` +
+
Router Paths
+ +
+ `; + } + + if (metadataContent) { + content += `
${metadataContent}
`; + } + + if (metadata.source_code) { + let lines = metadata.source_lines || metadata.source_code.split('\n'); + if (metadata.source_lines) { + lines = lines.map(function(line) { return line.replace(/\n$/, ''); }); + } + + let minIndent = Infinity; + lines.forEach(function (line) { + if (line.trim().length > 0) { + const match = line.match(/^\s*/); + const indent = match ? match[0].length : 0; + minIndent = Math.min(minIndent, indent); + } + }); + + const dedentedLines = lines.map(function (line) { + if (line.trim().length === 0) return ''; + return line.substring(minIndent); + }); + + const startLine = metadata.source_start_line || 1; + + const codeToHighlight = dedentedLines.join('\n').trim(); + + const highlightedCode = highlightPython(codeToHighlight); + + const highlightedLines = highlightedCode.split('\n'); + const numberedLines = highlightedLines.map(function (line, index) { + const lineNum = startLine + index; + return '
' + lineNum + '' + line + '
'; + }).join(''); + + const titleText = 'Source Code'; + + let classSection = ''; + if (metadata.class_signature) { + const highlightedClass = highlightPython(metadata.class_signature); + + let highlightedClassSignature = highlightedClass; + if (metadata.class_line_number) { + highlightedClassSignature = '' + metadata.class_line_number + '' + highlightedClass; + } + + classSection = ` +
+
Class
+
+
+
${highlightedClassSignature}
+
+
+
+ `; + } + + content += ` +
+
+
${titleText}
+ + + +
+
+
+
+ +
${numberedLines}
+
+
+ ${classSection} +
+
+ `; + } + + drawerContent.innerHTML = content; + + const copyButtons = drawerContent.querySelectorAll('.code-copy-button'); + copyButtons.forEach(function (button) { + button.addEventListener('click', function () { + const codeText = button.getAttribute('data-code'); + + navigator.clipboard.writeText(codeText).then(function () { + button.classList.add('copied'); + setTimeout(function () { + button.classList.remove('copied'); + }, 2000); + }).catch(function (err) { + console.error('Failed to copy:', err); + }); + }); + }); + + const accordionHeaders = drawerContent.querySelectorAll('.accordion-header'); + accordionHeaders.forEach(function (header) { + header.addEventListener('click', function () { + const accordionSection = header.closest('.accordion-section'); + + accordionSection.classList.toggle('expanded'); + }); + }); + + const triggerLinks = drawerContent.querySelectorAll('.drawer-code-link'); + triggerLinks.forEach(function (link) { + link.addEventListener('click', function () { + const targetNodeId = link.getAttribute('data-node-id'); + const currentNodeId = nodeName; + + if (targetNodeId) { + if (nodeRestoreAnimationId) { + cancelAnimationFrame(nodeRestoreAnimationId); + nodeRestoreAnimationId = null; + } + if (edgeRestoreAnimationId) { + cancelAnimationFrame(edgeRestoreAnimationId); + edgeRestoreAnimationId = null; + } + + if (isAnimating) { + clearHighlights(); + } + + const allCurrentEdges = edges.get(); + allCurrentEdges.forEach(edge => { + if (activeDrawerEdges.includes(edge.id)) { + return; + } + edges.update({ + id: edge.id, + opacity: 1.0, + width: 2 + }); + }); + + let edge = edges.get().find(function (e) { + return e.from === targetNodeId && e.to === currentNodeId; + }); + + let isForwardAnimation = false; + if (!edge) { + edge = edges.get().find(function (e) { + return e.from === currentNodeId && e.to === targetNodeId; + }); + isForwardAnimation = true; + } + + let actualTargetNodeId = targetNodeId; + let intermediateNodeId = null; + const targetNode = nodes.get(targetNodeId); + + if (!targetNode) { + const allNodesData = '{{ nodeData }}'; + + let routerNodeId = null; + for (const nodeId in allNodesData) { + const nodeMetadata = allNodesData[nodeId]; + if (nodeMetadata.router_paths && nodeMetadata.router_paths.includes(targetNodeId)) { + routerNodeId = nodeId; + break; + } + } + + if (routerNodeId) { + const allEdges = edges.get(); + edge = allEdges.find(function (e) { + return e.from === routerNodeId && e.to === currentNodeId; + }); + + if (edge) { + actualTargetNodeId = routerNodeId; + isForwardAnimation = false; + } else { + + const listenersOfPath = []; + for (const nodeId in allNodesData) { + const nodeMetadata = allNodesData[nodeId]; + if (nodeId !== currentNodeId && nodeMetadata.trigger_methods && nodeMetadata.trigger_methods.includes(targetNodeId)) { + listenersOfPath.push(nodeId); + } + } + + for (let i = 0; i < listenersOfPath.length; i++) { + const listenerNodeId = listenersOfPath[i]; + const edgeToCurrentNode = allEdges.find(function (e) { + return e.from === listenerNodeId && e.to === currentNodeId; + }); + + if (edgeToCurrentNode) { + actualTargetNodeId = routerNodeId; + intermediateNodeId = listenerNodeId; + isForwardAnimation = true; + edge = allEdges.find(function (e) { + return e.from === routerNodeId && e.to === listenerNodeId; + }); + break; + } + } + } + } + + if (!edge) { + for (const nodeId in allNodesData) { + const nodeMetadata = allNodesData[nodeId]; + if (nodeMetadata.trigger_methods && nodeMetadata.trigger_methods.includes(targetNodeId)) { + actualTargetNodeId = nodeId; + + const allEdges = edges.get(); + edge = allEdges.find(function (e) { + return e.from === currentNodeId && e.to === actualTargetNodeId; + }); + + break; + } + } + } + } + + let nodesToHide = []; + let edgesToHide = []; + let edgesToRestore = []; + + const nodeData = nodes.get(actualTargetNodeId); + if (nodeData) { + let animationSourceId, animationTargetId; + if (intermediateNodeId) { + animationSourceId = actualTargetNodeId; + animationTargetId = intermediateNodeId; + } else { + animationSourceId = isForwardAnimation ? currentNodeId : actualTargetNodeId; + animationTargetId = isForwardAnimation ? actualTargetNodeId : currentNodeId; + } + + const allNodes = nodes.get(); + const originalNodeOpacities = new Map(); + + const activeNodeIds = [animationSourceId, animationTargetId]; + if (intermediateNodeId) { + activeNodeIds.push(intermediateNodeId); + } + if (currentNodeId) { + activeNodeIds.push(currentNodeId); + } + + allNodes.forEach(node => { + originalNodeOpacities.set(node.id, node.opacity !== undefined ? node.opacity : 1); + if (activeNodeIds.includes(node.id)) { + nodes.update({ + id: node.id, + opacity: 1.0 + }); + } else { + nodes.update({ + id: node.id, + opacity: 0.2 + }); + } + }); + + const allEdges = edges.get(); + const originalEdgesMap = new Map(); + allEdges.forEach(edge => { + originalEdgesMap.set(edge.id, { + opacity: edge.opacity !== undefined ? edge.opacity : 1.0, + width: edge.width || 2, + color: edge.color + }); + edges.update({ + id: edge.id, + opacity: 0.2 + }); + }); + + const sourceNodeData = nodes.get(animationSourceId); + const targetNodeData = nodes.get(animationTargetId); + const sourceOriginalShadow = sourceNodeData ? sourceNodeData.shadow : null; + + originalNodeData = { + shadow: targetNodeData ? targetNodeData.shadow : null, + opacity: targetNodeData ? targetNodeData.opacity : 1, + originalOpacities: originalNodeOpacities, + originalEdgesMap: originalEdgesMap + }; + originalSourceNodeData = { + shadow: sourceOriginalShadow + }; + highlightedNodeId = animationTargetId; + highlightedSourceNodeId = animationSourceId; + isAnimating = true; + + const phase1Duration = 150; + const phase1Start = Date.now(); + + function animateSourcePulse() { + if (!isAnimating) { + return; + } + + const elapsed = Date.now() - phase1Start; + const progress = Math.min(elapsed / phase1Duration, 1); + + const eased = progress < 0.5 + ? 2 * progress * progress + : 1 - Math.pow(-2 * progress + 2, 2) / 2; + + const pulseSize = eased * 15; + + nodes.update({ + id: animationSourceId, + shadow: { + enabled: true, + color: '{{ CREWAI_ORANGE }}', + size: pulseSize, + x: 0, + y: 0 + } + }); + + if (progress < 1 && isAnimating) { + requestAnimationFrame(animateSourcePulse); + } else { + startPhase2(); + } + } + + function startPhase2() { + const phase2Duration = 400; + const phase2Start = Date.now(); + + if (edge) { + originalEdgeData = { + shadow: edge.shadow, + level: edge.level + }; + highlightedEdgeId = edge.id; + } + + let secondHopEdge = null; + if (intermediateNodeId && currentNodeId) { + const allEdges = edges.get(); + secondHopEdge = allEdges.find(function (e) { + return e.from === intermediateNodeId && e.to === currentNodeId; + }); + } + + function animateTravel() { + if (!isAnimating) return; + + const elapsed = Date.now() - phase2Start; + const progress = Math.min(elapsed / phase2Duration, 1); + + const eased = 1 - Math.pow(1 - progress, 3); + + if (edge) { + const edgeGlowSize = eased * 15; + const edgeWidth = 2 + (eased * 6); + edges.update({ + id: edge.id, + width: edgeWidth, + opacity: 1.0, + color: { + color: '{{ CREWAI_ORANGE }}', + highlight: '{{ CREWAI_ORANGE }}' + }, + shadow: { + enabled: true, + color: '{{ CREWAI_ORANGE }}', + size: edgeGlowSize, + x: 0, + y: 0 + } + }); + } + + if (secondHopEdge && progress > 0.5) { + const secondHopProgress = (progress - 0.5) / 0.5; + const secondHopEased = 1 - Math.pow(1 - secondHopProgress, 3); + const secondEdgeGlowSize = secondHopEased * 15; + const secondEdgeWidth = 2 + (secondHopEased * 6); + + edges.update({ + id: secondHopEdge.id, + width: secondEdgeWidth, + opacity: 1.0, + color: { + color: '{{ CREWAI_ORANGE }}', + highlight: '{{ CREWAI_ORANGE }}' + }, + shadow: { + enabled: true, + color: '{{ CREWAI_ORANGE }}', + size: secondEdgeGlowSize, + x: 0, + y: 0 + } + }); + } + + if (progress > 0.3) { + const nodeProgress = (progress - 0.3) / 0.7; + const nodeEased = 1 - Math.pow(1 - nodeProgress, 3); + const glowSize = nodeEased * 25; + + nodes.update({ + id: animationTargetId, + shadow: { + enabled: true, + color: '{{ CREWAI_ORANGE }}', + size: glowSize, + x: 0, + y: 0 + } + }); + } + + if (progress < 1 && isAnimating) { + requestAnimationFrame(animateTravel); + } else { + nodes.update({ + id: animationSourceId, + shadow: null + }); + nodes.update({ + id: animationTargetId, + shadow: null + }); + } + } + + animateTravel(); + } + + animateSourcePulse(); + } else if (edge) { + clearHighlights(); + + originalEdgeData = { + shadow: edge.shadow + }; + highlightedEdgeId = edge.id; + isAnimating = true; + + const animationDuration = 300; + const startTime = Date.now(); + + function animateEdgeGlow() { + if (!isAnimating) return; + + const elapsed = Date.now() - startTime; + const progress = Math.min(elapsed / animationDuration, 1); + const eased = 1 - Math.pow(1 - progress, 3); + const edgeGlowSize = eased * 15; + + edges.update({ + id: edge.id, + shadow: { + enabled: true, + color: '{{ CREWAI_ORANGE }}', + size: edgeGlowSize, + x: 0, + y: 0 + } + }); + + if (progress < 1 && isAnimating) { + requestAnimationFrame(animateEdgeGlow); + } + } + + animateEdgeGlow(); + } + + } + }); + }); + + const triggeredByLinks = drawerContent.querySelectorAll('.drawer-code-link[data-node-id]'); + triggeredByLinks.forEach(function (link) { + link.addEventListener('click', function (e) { + e.preventDefault(); + e.stopPropagation(); + const triggerNodeId = this.getAttribute('data-node-id'); + highlightTriggeredBy(triggerNodeId); + }); + }); + + drawer.style.visibility = 'visible'; + + const wasAlreadyOpen = drawer.classList.contains('open'); + requestAnimationFrame(function () { + drawer.classList.add('open'); + overlay.classList.add('visible'); + document.querySelector('.nav-controls').classList.add('drawer-open'); + + if (!wasAlreadyOpen) { + setTimeout(function () { + const currentScale = network.getScale(); + const currentPosition = network.getViewPosition(); + + const drawerWidth = 400; + const offsetX = (drawerWidth * 0.3) / currentScale; + network.moveTo({ + position: { + x: currentPosition.x + offsetX, + y: currentPosition.y + }, + scale: currentScale, + animation: { + duration: 300, + easingFunction: 'easeInOutQuad' + } + }); + }, 50); // Small delay to let drawer animation start + } + }); + } + + function closeDrawer() { + const drawer = document.getElementById('drawer'); + const overlay = document.getElementById('drawer-overlay'); + drawer.classList.remove('open'); + overlay.classList.remove('visible'); + document.querySelector('.nav-controls').classList.remove('drawer-open'); + + clearTriggeredByHighlight(); + if (activeDrawerNodeId) { + activeDrawerEdges.forEach(function (edgeId) { + animateEdgeWidth(edgeId, 2, 200); + }); + activeDrawerNodeId = null; + activeDrawerEdges = []; + + network.redraw(); + } + setTimeout(function () { + network.fit({ + animation: { + duration: 300, + easingFunction: 'easeInOutQuad' + } + }); + }, 50); + + setTimeout(function () { + if (!drawer.classList.contains('open')) { + drawer.style.visibility = 'hidden'; + } + }, 300); + } + + document.getElementById('drawer-overlay').addEventListener('click', function () { + closeDrawer(); + clearHighlights(); + }); + document.getElementById('drawer-close').addEventListener('click', closeDrawer); + + document.addEventListener('keydown', function (e) { + if (e.key === 'Escape') { + closeDrawer(); + } + }); + + network.once('stabilizationIterationsDone', function () { + network.fit(); + }); + + + document.getElementById('zoom-in').addEventListener('click', function () { + const scale = network.getScale(); + network.moveTo({ + scale: scale * 1.2, + animation: { + duration: 200, + easingFunction: 'easeInOutQuad' + } + }); + }); + + document.getElementById('zoom-out').addEventListener('click', function () { + const scale = network.getScale(); + network.moveTo({ + scale: scale * 0.8, + animation: { + duration: 200, + easingFunction: 'easeInOutQuad' + } + }); + }); + + document.getElementById('fit').addEventListener('click', function () { + network.fit({ + animation: { + duration: 300, + easingFunction: 'easeInOutQuad' + } + }); + }); + + document.getElementById('export-png').addEventListener('click', function () { + const script = document.createElement('script'); + script.src = 'https://cdnjs.cloudflare.com/ajax/libs/html2canvas/1.4.1/html2canvas.min.js'; + script.onload = function () { + html2canvas(document.getElementById('network-container')).then(function (canvas) { + const link = document.createElement('a'); + link.download = 'flow_dag.png'; + link.href = canvas.toDataURL(); + link.click(); + }); + }; + document.head.appendChild(script); + }); + + document.getElementById('export-pdf').addEventListener('click', function () { + const script1 = document.createElement('script'); + script1.src = 'https://cdnjs.cloudflare.com/ajax/libs/html2canvas/1.4.1/html2canvas.min.js'; + script1.onload = function () { + const script2 = document.createElement('script'); + script2.src = 'https://cdnjs.cloudflare.com/ajax/libs/jspdf/2.5.1/jspdf.umd.min.js'; + script2.onload = function () { + html2canvas(document.getElementById('network-container')).then(function (canvas) { + const imgData = canvas.toDataURL('image/png'); + const {jsPDF} = window.jspdf; + const pdf = new jsPDF({ + orientation: canvas.width > canvas.height ? 'landscape' : 'portrait', + unit: 'px', + format: [canvas.width, canvas.height] + }); + pdf.addImage(imgData, 'PNG', 0, 0, canvas.width, canvas.height); + pdf.save('flow_dag.pdf'); + }); + }; + document.head.appendChild(script2); + }; + document.head.appendChild(script1); + }); + + document.getElementById('export-json').addEventListener('click', function () { + const dagData = '{{ dagData }}'; + const dataStr = JSON.stringify(dagData, null, 2); + const blob = new Blob([dataStr], {type: 'application/json'}); + const url = URL.createObjectURL(blob); + const link = document.createElement('a'); + link.download = 'flow_dag.json'; + link.href = url; + link.click(); + URL.revokeObjectURL(url); + }); + + const themeToggle = document.getElementById('theme-toggle'); + const htmlElement = document.documentElement; + + function getCSSVariable(name) { + return getComputedStyle(htmlElement).getPropertyValue(name).trim(); + } + + function updateEdgeLabelColors() { + edges.forEach(function (edge) { + edges.update({ + id: edge.id, + font: { + color: 'transparent', + background: 'transparent' + } + }); + }); + } + + const savedTheme = localStorage.getItem('theme') || 'light'; + if (savedTheme === 'dark') { + htmlElement.setAttribute('data-theme', 'dark'); + themeToggle.textContent = '☀️'; + themeToggle.title = 'Toggle Light Mode'; + setTimeout(updateEdgeLabelColors, 0); + } + + themeToggle.addEventListener('click', function () { + const currentTheme = htmlElement.getAttribute('data-theme'); + const newTheme = currentTheme === 'dark' ? 'light' : 'dark'; + + if (newTheme === 'dark') { + htmlElement.setAttribute('data-theme', 'dark'); + themeToggle.textContent = '☀️'; + themeToggle.title = 'Toggle Light Mode'; + } else { + htmlElement.removeAttribute('data-theme'); + themeToggle.textContent = '🌙'; + themeToggle.title = 'Toggle Dark Mode'; + } + + localStorage.setItem('theme', newTheme); + setTimeout(updateEdgeLabelColors, 50); + }); + } catch (e) { + console.error(e); + } +})() diff --git a/lib/crewai/src/crewai/flow/visualization/assets/interactive_flow.html.j2 b/lib/crewai/src/crewai/flow/visualization/assets/interactive_flow.html.j2 new file mode 100644 index 000000000..dcec5e2c4 --- /dev/null +++ b/lib/crewai/src/crewai/flow/visualization/assets/interactive_flow.html.j2 @@ -0,0 +1,115 @@ + + + + CrewAI Flow Visualization + + + + + + + + + + +
+ + + + + + + +
+
+ CrewAI Logo +
+

Flow Execution

+
+

Nodes: '{{ dag_nodes_count }}'

+

Edges: '{{ dag_edges_count }}'

+

Topological Paths: '{{ execution_paths }}'

+
+
+
Node Types
+
+
+ Start Methods +
+
+
+ Router Methods +
+
+
+ Listen Methods +
+
+
+
Edge Types
+
+ + + + Router Paths +
+
+ + + + OR Conditions +
+
+ + + + AND Conditions +
+
+
+ Interactions:
+ • Drag to pan
+ • Scroll to zoom

+ IDE: + +
+
+ + + + +
+
+
+ + diff --git a/lib/crewai/src/crewai/flow/visualization/assets/style.css b/lib/crewai/src/crewai/flow/visualization/assets/style.css new file mode 100644 index 000000000..1bb55825a --- /dev/null +++ b/lib/crewai/src/crewai/flow/visualization/assets/style.css @@ -0,0 +1,849 @@ +:root { + --bg-primary: '{{ WHITE }}'; + --bg-secondary: rgba(255, 255, 255, 0.95); + --bg-drawer: rgba(255, 255, 255, 0.98); + --bg-overlay: rgba(0, 0, 0, 0.3); + --text-primary: '{{ DARK_GRAY }}'; + --text-secondary: '{{ GRAY }}'; + --border-color: #e0e0e0; + --border-subtle: rgba(102, 102, 102, 0.3); + --grid-color: rgba(102, 102, 102, 0.08); + --shadow-color: rgba(0, 0, 0, 0.1); + --shadow-strong: rgba(0, 0, 0, 0.15); + --edge-label-text: '{{ GRAY }}'; + --edge-label-bg: rgba(255, 255, 255, 0.8); +} + +[data-theme="dark"] { + --bg-primary: #0d1117; + --bg-secondary: rgba(22, 27, 34, 0.95); + --bg-drawer: rgba(22, 27, 34, 0.98); + --bg-overlay: rgba(0, 0, 0, 0.5); + --text-primary: #e6edf3; + --text-secondary: #7d8590; + --border-color: #30363d; + --border-subtle: rgba(48, 54, 61, 0.5); + --grid-color: rgba(255, 255, 255, 0.05); + --shadow-color: rgba(0, 0, 0, 0.3); + --shadow-strong: rgba(0, 0, 0, 0.5); + --edge-label-text: #c9d1d9; + --edge-label-bg: rgba(22, 27, 34, 0.9); +} + +@keyframes dash { + to { + stroke-dashoffset: -30; + } +} + +body { + margin: 0; + padding: 0; + font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', 'Ubuntu', 'Cantarell', sans-serif; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; + background: var(--bg-primary); + background-image: + linear-gradient(var(--grid-color) 1px, transparent 1px), + linear-gradient(90deg, var(--grid-color) 1px, transparent 1px); + background-size: 20px 20px; + background-position: -1px -1px; + transition: background-color 0.3s ease; +} +#network-container { + width: 100vw; + height: 100vh; + position: fixed; + top: 0; + left: 0; + z-index: 1999; + pointer-events: none; +} +#network { + width: 100%; + height: 100%; + border: none; + background: transparent; + pointer-events: auto; +} +#info { + position: absolute; + top: 20px; + left: 20px; + background: var(--bg-secondary); + padding: 20px; + border-radius: 8px; + box-shadow: 0 4px 12px var(--shadow-strong); + max-width: 320px; + border: 1px solid var(--border-color); + z-index: 10000; + pointer-events: auto; + transition: background 0.3s ease, border-color 0.3s ease, box-shadow 0.3s ease; +} +h3 { + margin: 0 0 15px 0; + color: var(--text-primary); + font-size: 18px; + font-weight: 600; +} +.stats { + margin-bottom: 15px; +} +.stats p { + margin: 5px 0; + color: var(--text-secondary); + font-size: 14px; + transition: color 0.3s ease; +} +.stats strong { + color: var(--text-primary); + transition: color 0.3s ease; +} +.legend { + margin-top: 15px; + padding-top: 15px; + border-top: 1px solid var(--border-color); + transition: border-color 0.3s ease; +} +.legend-title { + font-weight: 600; + color: var(--text-primary); + margin-bottom: 10px; + font-size: 14px; + transition: color 0.3s ease; +} +.legend-item { + margin: 8px 0; + display: flex; + align-items: center; +} +.legend-color { + width: 24px; + height: 24px; + margin-right: 12px; + border-radius: 3px; + box-sizing: border-box; +} +.legend-item span { + color: var(--text-secondary); + font-size: 13px; + transition: color 0.3s ease; +} +.instructions { + margin-top: 15px; + padding-top: 15px; + border-top: 1px solid var(--border-color); + font-size: 12px; + color: var(--text-secondary); + line-height: 1.5; + transition: color 0.3s ease, border-color 0.3s ease; +} + +#ide-selector { + pointer-events: auto; + position: relative; + z-index: 10001; + cursor: pointer; +} + +.nav-controls { + position: fixed; + top: auto; + left: 20px; + bottom: 20px; + right: auto; + display: grid; + grid-template-columns: repeat(4, 40px); + gap: 8px; + z-index: 10002; + pointer-events: auto; + max-width: 320px; +} + +.nav-controls.drawer-open { +} + +.nav-button { + width: 40px; + height: 40px; + background: var(--bg-secondary); + border: 1px solid var(--border-subtle); + border-radius: 6px; + display: flex; + align-items: center; + justify-content: center; + cursor: pointer; + box-shadow: 0 2px 8px var(--shadow-color); + font-size: 18px; + color: var(--text-primary); + user-select: none; + pointer-events: auto; + position: relative; + z-index: 10001; + transition: background 0.3s ease, border-color 0.3s ease, color 0.3s ease, box-shadow 0.3s ease; +} + +.nav-button:hover { + background: var(--border-subtle); +} + +#drawer { + position: fixed; + top: 0; + right: -400px; + width: 400px; + height: 100vh; + background: var(--bg-drawer); + box-shadow: -4px 0 12px var(--shadow-strong); + transition: right 0.3s cubic-bezier(0.4, 0, 0.2, 1), background 0.3s ease, box-shadow 0.3s ease; + z-index: 2000; + overflow-y: auto; + padding: 24px; +} + +#drawer.open { + right: 0; +} + +#drawer-overlay { + position: fixed; + top: 0; + left: 0; + width: 100vw; + height: 100vh; + background: var(--bg-overlay); + opacity: 0; + pointer-events: none; + transition: opacity 0.3s cubic-bezier(0.4, 0, 0.2, 1), background 0.3s ease; + z-index: 1998; + cursor: pointer; +} + +#drawer-overlay.visible { + opacity: 1; + pointer-events: auto; +} + +#highlight-canvas { + position: fixed; + top: 0; + left: 0; + width: 100vw; + height: 100vh; + z-index: 1999; + pointer-events: none; + opacity: 0; + transition: opacity 0.3s cubic-bezier(0.4, 0, 0.2, 1); +} + +#highlight-canvas.visible { + opacity: 1; +} + +.drawer-header { + display: flex; + justify-content: space-between; + align-items: center; + margin-bottom: 20px; + padding-bottom: 16px; + border-bottom: 2px solid '{{ CREWAI_ORANGE }}'; + position: relative; + z-index: 2001; +} + +.drawer-title { + font-size: 20px; + font-weight: 700; + color: var(--text-primary); + transition: color 0.3s ease; +} + +.drawer-close { + background: none; + border: none; + font-size: 24px; + color: var(--text-secondary); + cursor: pointer; + padding: 4px 8px; + line-height: 1; + transition: color 0.3s ease; +} + +.drawer-close:hover { + color: '{{ CREWAI_ORANGE }}'; +} + +.drawer-open-ide { + background: '{{ CREWAI_ORANGE }}'; + border: none; + color: white; + cursor: pointer; + padding: 6px 12px; + border-radius: 4px; + font-size: 12px; + font-weight: 600; + transition: all 0.2s ease; + display: flex; + align-items: center; + gap: 6px; + margin-right: 12px; + position: relative; + z-index: 9999; + pointer-events: auto; +} + +.drawer-open-ide:hover { + background: #ff6b61; + transform: translateY(-1px); + box-shadow: 0 2px 8px rgba(255, 90, 80, 0.3); +} + +.drawer-open-ide:active { + transform: translateY(0); + box-shadow: 0 1px 4px rgba(255, 90, 80, 0.2); +} + +.drawer-open-ide svg { + width: 14px; + height: 14px; +} + +.drawer-content { + color: '{{ DARK_GRAY }}'; + line-height: 1.6; +} + +.drawer-section { + margin-bottom: 20px; +} + +.drawer-metadata-grid { + display: grid; + grid-template-columns: 1fr 1fr; + grid-template-rows: 1fr 1fr; + gap: 0; + margin-bottom: 20px; + position: relative; +} + +.drawer-metadata-grid::before { + content: ''; + position: absolute; + left: 50%; + top: 0; + bottom: 0; + width: 1px; + background: rgba(102,102,102,0.15); + z-index: 1; +} + +.drawer-metadata-grid::after { + content: ''; + position: absolute; + left: 0; + right: 0; + top: 50%; + height: 1px; + background: rgba(102,102,102,0.15); + z-index: 1; +} + +.drawer-metadata-grid .drawer-section { + padding: 16px; + position: relative; +} + +.drawer-metadata-grid .drawer-section:nth-child(4):nth-last-child(1), +.drawer-metadata-grid .drawer-section:nth-child(3):nth-last-child(2), +.drawer-metadata-grid .drawer-section:nth-child(2):nth-last-child(3), +.drawer-metadata-grid .drawer-section:nth-child(1):nth-last-child(4) { + display: flex; + flex-direction: column; + align-items: center; + text-align: center; + padding-top: 8px; +} + +.drawer-metadata-grid .drawer-section:nth-child(4):nth-last-child(1) .drawer-section-title, +.drawer-metadata-grid .drawer-section:nth-child(3):nth-last-child(2) .drawer-section-title, +.drawer-metadata-grid .drawer-section:nth-child(2):nth-last-child(3) .drawer-section-title, +.drawer-metadata-grid .drawer-section:nth-child(1):nth-last-child(4) .drawer-section-title { + margin-bottom: auto; +} + +.drawer-metadata-grid .drawer-section:nth-child(4):nth-last-child(1) > *:not(.drawer-section-title), +.drawer-metadata-grid .drawer-section:nth-child(3):nth-last-child(2) > *:not(.drawer-section-title), +.drawer-metadata-grid .drawer-section:nth-child(2):nth-last-child(3) > *:not(.drawer-section-title), +.drawer-metadata-grid .drawer-section:nth-child(1):nth-last-child(4) > *:not(.drawer-section-title) { + margin-top: auto; + margin-bottom: auto; + align-self: center; +} + +.drawer-metadata-grid .drawer-section:nth-child(1):nth-last-child(1) { + grid-row: 1 / 3; + grid-column: 1 / 3; + display: flex; + flex-direction: column; + justify-content: center; + align-items: center; + text-align: center; +} + +.drawer-metadata-grid:has(.drawer-section:nth-child(1):nth-last-child(1))::before, +.drawer-metadata-grid:has(.drawer-section:nth-child(1):nth-last-child(1))::after { + display: none; +} + +.drawer-metadata-grid:has(.drawer-section:nth-child(2):nth-last-child(1)) { + grid-template-rows: 1fr; +} + +.drawer-metadata-grid .drawer-section:nth-child(2):nth-last-child(1), +.drawer-metadata-grid .drawer-section:nth-child(1):nth-last-child(2) { + display: flex; + flex-direction: column; + align-items: center; + text-align: center; + justify-content: center; +} + +.drawer-metadata-grid:has(.drawer-section:nth-child(2):nth-last-child(1))::after { + display: none; +} + +.drawer-metadata-grid .drawer-section:nth-child(3):nth-last-child(1) { + grid-row: 1 / 3; + grid-column: 2; + display: flex; + flex-direction: column; + justify-content: center; +} + +.drawer-metadata-grid:has(.drawer-section:nth-child(3):nth-last-child(1))::after { + right: 50%; +} + +.drawer-section-title { + font-size: 12px; + text-transform: uppercase; + color: '{{ GRAY }}'; + letter-spacing: 0.5px; + margin-bottom: 8px; + font-weight: 600; +} + +.drawer-badge { + display: inline-block; + padding: 4px 12px; + border-radius: 4px; + font-size: 11px; + font-weight: 600; + text-transform: uppercase; + letter-spacing: 0.5px; +} + +.drawer-list { + list-style: none; + padding: 0; + margin: 0; +} + +.drawer-list li { + padding: 6px 0; + border-bottom: 1px solid rgba(102,102,102,0.1); +} + +.drawer-list li:last-child { + border-bottom: none; +} + +.drawer-section:has(.drawer-code-link[style*="color: '{{ CREWAI_ORANGE }}'"]) .drawer-list li { + border-bottom: none; + padding: 3px 0; +} + +.drawer-metadata-grid .drawer-section:nth-child(3) .drawer-list li { + border-bottom: none; + padding: 3px 0; +} + +.drawer-code { + background: rgba(102,102,102,0.08); + padding: 4px 8px; + border-radius: 3px; + font-size: 12px; + font-family: monospace; + color: '{{ DARK_GRAY }}'; + border: 1px solid rgba(102,102,102,0.12); +} + +.drawer-code-link { + background: rgba(102,102,102,0.08); + padding: 4px 8px; + border-radius: 3px; + font-size: 12px; + font-family: monospace; + color: '{{ CREWAI_ORANGE }}'; + border: 1px solid rgba(255,90,80,0.2); + cursor: pointer; + transition: all 0.2s; + display: inline-block; +} + +.drawer-code-link:hover { + background: rgba(255,90,80,0.12); + border-color: '{{ CREWAI_ORANGE }}'; + transform: translateX(2px); +} + +.code-block-container { + background: transparent; + border: 1px solid #30363d; + overflow-x: auto; + position: relative; +} + +.code-copy-button { + position: absolute; + top: 6px; + right: 6px; + background: rgba(255, 255, 255, 0.1); + border: 1px solid rgba(255, 255, 255, 0.2); + color: #c9d1d9; + padding: 5px; + border-radius: 4px; + cursor: pointer; + transition: all 0.2s; + font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; + z-index: 10; + width: 18px; + height: 18px; + display: flex; + align-items: center; + justify-content: center; +} + +.code-copy-button:hover { + background: rgba(255, 255, 255, 0.15); + border-color: rgba(255, 255, 255, 0.3); +} + +.code-copy-button:active { + background: rgba(255, 255, 255, 0.2); +} + +.code-copy-button.copied { + background: rgba(16, 185, 129, 0.2); + border-color: rgba(16, 185, 129, 0.4); + color: #10b981; +} + +.code-copy-button svg { + width: 10px; + height: 10px; + position: absolute; + transition: opacity 0.3s cubic-bezier(0.4, 0, 0.2, 1), transform 0.3s cubic-bezier(0.4, 0, 0.2, 1); +} + +.code-copy-button .copy-icon { + opacity: 1; + transform: scale(1) rotate(0deg); +} + +.code-copy-button .check-icon { + opacity: 0; + transform: scale(0.5) rotate(-90deg); +} + +.code-copy-button.copied .copy-icon { + opacity: 0; + transform: scale(0.5) rotate(90deg); +} + +.code-copy-button.copied .check-icon { + opacity: 1; + transform: scale(1) rotate(0deg); +} + +.code-block { + margin: 0; + padding: 0; + overflow-x: auto; +} + +.code-block .code-line { + display: block; + white-space: pre; + min-height: 1.6em; +} + +.code-block .line-number { + color: #6e7681; + display: inline-block; + padding-left: 10px; + padding-right: 8px; + margin-right: 8px; + text-align: right; + user-select: none; + border-right: 1px solid #30363d; +} + +.code-block .json-key { + color: #79c0ff; + font-weight: 400; +} + +.code-block .json-bracket { + color: #c9d1d9; + font-weight: 400; +} + +.code-block .json-colon { + color: #c9d1d9; +} + +.accordion-section { + border: 1px solid rgba(102,102,102,0.15); + border-top: 1px solid rgba(255, 255, 255, 0.1); + border-left: 1px solid rgba(255, 255, 255, 0.05); + border-radius: 6px; + margin-bottom: 12px; + overflow: hidden; + transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1); + background: #0d1117; + box-shadow: 0 2px 8px rgba(0, 0, 0, 0.2), 0 1px 3px rgba(0, 0, 0, 0.3); +} + +.accordion-section:hover { + border-color: rgba(102,102,102,0.25); +} + +.accordion-header { + display: flex; + justify-content: space-between; + align-items: center; + padding: 10px 10px; + cursor: pointer; + background: rgba(102,102,102,0.05); + transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1); + user-select: none; +} + +.accordion-subheader { + display: flex; + justify-content: space-between; + align-items: center; + padding: 10px 10px; + cursor: pointer; + background: #0d1117; + transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1); + user-select: none; + box-shadow: 0 1px 4px rgba(0, 0, 0, 0.15); + border-top: 1px solid rgba(255, 255, 255, 0.08); + border-bottom: 1px solid rgba(0, 0, 0, 0.2); +} + +.accordion-header:hover { + background: rgba(102,102,102,0.1); +} + +.accordion-header:active { + background: rgba(102,102,102,0.15); +} + +.accordion-title { + font-size: 12px; + font-weight: 400; + text-transform: uppercase; + color: white; + letter-spacing: 0.5px; + font-family: monospace; +} + +.accordion-icon { + width: 16px; + height: 16px; + transition: transform 0.3s cubic-bezier(0.4, 0, 0.2, 1); + color: '{{ GRAY }}'; +} + +.accordion-section.expanded .accordion-icon { + transform: rotate(180deg); +} + +.accordion-content { + max-height: 0; + overflow: hidden; + transition: max-height 0.3s cubic-bezier(0.4, 0, 0.2, 1), + opacity 0.3s cubic-bezier(0.4, 0, 0.2, 1); + opacity: 0; + padding: 0; +} + +.accordion-section.expanded .accordion-content { + max-height: 2000px; + opacity: 1; + padding: 0; +} + +.accordion-content .drawer-section { + margin-bottom: 0; +} + +.accordion-content .drawer-section:not(:first-child) { + margin-top: 0; +} + +.accordion-content .drawer-section-title { + display: none; +} + +.accordion-content .class-section-title { + display: block; + font-size: 12px; + text-transform: uppercase; + color: '{{ GRAY }}'; + letter-spacing: 0.5px; + margin-bottom: 8px; + font-weight: 600; +} + +/* + * Synthwave '84 Theme originally by Robb Owen [@Robb0wen] for Visual Studio Code + * Demo: https://marc.dev/demo/prism-synthwave84 + * + * Ported for PrismJS by Marc Backes [@themarcba] + */ + +code[class*="language-"], +pre[class*="language-"] { + color: #f92aad; + text-shadow: 0 0 2px #100c0f, 0 0 5px #dc078e33, 0 0 10px #fff3; + background: none; + font-family: Consolas, Monaco, 'Andale Mono', 'Ubuntu Mono', monospace; + font-size: 0.7em; + text-align: left; + white-space: pre; + word-spacing: normal; + word-break: normal; + word-wrap: normal; + line-height: 1.25; + + -moz-tab-size: 4; + -o-tab-size: 4; + tab-size: 4; + + -webkit-hyphens: none; + -moz-hyphens: none; + -ms-hyphens: none; + hyphens: none; +} + +pre[class*="language-"] { + padding-top: 0.4em; + padding-bottom: 0.4em; + overflow: auto; +} + +:not(pre) > code[class*="language-"], +pre[class*="language-"] { + background-color: transparent !important; + background-image: linear-gradient(to bottom, #2a2139 75%, #34294f); +} + +:not(pre) > code[class*="language-"] { + padding: .1em; + border-radius: .3em; + white-space: normal; +} + +.token.comment, +.token.block-comment, +.token.prolog, +.token.doctype, +.token.cdata { + color: #8e8e8e; +} + +.token.punctuation { + color: #ccc; +} + +.token.tag, +.token.attr-name, +.token.namespace, +.token.number, +.token.unit, +.token.hexcode, +.token.deleted { + color: #e2777a; +} + +.token.property, +.token.selector { + color: #72f1b8; + text-shadow: 0 0 2px #100c0f, 0 0 10px #257c5575, 0 0 35px #21272475; +} + +.token.function-name { + color: #6196cc; +} + +.token.boolean, +.token.selector .token.id, +.token.function { + color: #fdfdfd; + text-shadow: 0 0 2px #001716, 0 0 3px #03edf975, 0 0 5px #03edf975, 0 0 8px #03edf975; +} + +.token.class-name { + color: #fff5f6; + text-shadow: 0 0 2px #000, 0 0 10px #fc1f2c75, 0 0 5px #fc1f2c75, 0 0 25px #fc1f2c75; +} + +.token.constant, +.token.symbol { + color: #f92aad; + text-shadow: 0 0 2px #100c0f, 0 0 5px #dc078e33, 0 0 10px #fff3; +} + +.token.important, +.token.atrule, +.token.keyword, +.token.selector .token.class, +.token.builtin { + color: #f4eee4; + text-shadow: 0 0 2px #393a33, 0 0 8px #f39f0575, 0 0 2px #f39f0575; +} + +.token.string, +.token.char, +.token.attr-value, +.token.regex, +.token.variable { + color: #f87c32; +} + +.token.operator, +.token.entity, +.token.url { + color: #67cdcc; +} + +.token.important, +.token.bold { + font-weight: bold; +} + +.token.italic { + font-style: italic; +} + +.token.entity { + cursor: help; +} + +.token.inserted { + color: green; +} diff --git a/lib/crewai/src/crewai/flow/visualization/builder.py b/lib/crewai/src/crewai/flow/visualization/builder.py new file mode 100644 index 000000000..861cbe42a --- /dev/null +++ b/lib/crewai/src/crewai/flow/visualization/builder.py @@ -0,0 +1,432 @@ +"""Flow structure builder for analyzing Flow execution.""" + +from __future__ import annotations +from collections import defaultdict +import inspect +from typing import TYPE_CHECKING, Any + +from crewai.flow.constants import OR_CONDITION +from crewai.flow.types import FlowMethodName +from crewai.flow.utils import ( + _extract_all_methods_recursive, + is_flow_condition_dict, + is_simple_flow_condition, +) +from crewai.flow.visualization.schema import extract_method_signature +from crewai.flow.visualization.types import FlowStructure, NodeMetadata, StructureEdge + + +if TYPE_CHECKING: + from crewai.flow.flow import Flow + + +def _extract_direct_or_triggers( + condition: str | dict[str, Any] | list[Any], +) -> list[str]: + """Extract direct OR-level trigger strings from a condition. + + This function extracts strings that would directly trigger a listener, + meaning they appear at the top level of an OR condition. Strings nested + inside AND conditions are NOT considered direct triggers for router paths. + + For example: + - or_("a", "b") -> ["a", "b"] (both are direct triggers) + - and_("a", "b") -> [] (neither are direct triggers, both required) + - or_(and_("a", "b"), "c") -> ["c"] (only "c" is a direct trigger) + + Args: + condition: Can be a string, dict, or list. + + Returns: + List of direct OR-level trigger strings. + """ + if isinstance(condition, str): + return [condition] + if isinstance(condition, dict): + cond_type = condition.get("type", "OR") + conditions_list = condition.get("conditions", []) + + if cond_type == "OR": + strings = [] + for sub_cond in conditions_list: + strings.extend(_extract_direct_or_triggers(sub_cond)) + return strings + else: + return [] + if isinstance(condition, list): + strings = [] + for item in condition: + strings.extend(_extract_direct_or_triggers(item)) + return strings + if callable(condition) and hasattr(condition, "__name__"): + return [condition.__name__] + return [] + + +def _extract_all_trigger_names( + condition: str | dict[str, Any] | list[Any], +) -> list[str]: + """Extract ALL trigger names from a condition for display purposes. + + Unlike _extract_direct_or_triggers, this extracts ALL strings and method + names from the entire condition tree, including those nested in AND conditions. + This is used for displaying trigger information in the UI. + + For example: + - or_("a", "b") -> ["a", "b"] + - and_("a", "b") -> ["a", "b"] + - or_(and_("a", method_6), method_4) -> ["a", "method_6", "method_4"] + + Args: + condition: Can be a string, dict, or list. + + Returns: + List of all trigger names found in the condition. + """ + if isinstance(condition, str): + return [condition] + if isinstance(condition, dict): + conditions_list = condition.get("conditions", []) + strings = [] + for sub_cond in conditions_list: + strings.extend(_extract_all_trigger_names(sub_cond)) + return strings + if isinstance(condition, list): + strings = [] + for item in condition: + strings.extend(_extract_all_trigger_names(item)) + return strings + if callable(condition) and hasattr(condition, "__name__"): + return [condition.__name__] + return [] + + +def build_flow_structure(flow: Flow[Any]) -> FlowStructure: + """Build a structure representation of a Flow's execution. + + Args: + flow: Flow instance to analyze. + + Returns: + Dictionary with nodes, edges, start_methods, and router_methods. + """ + nodes: dict[str, NodeMetadata] = {} + edges: list[StructureEdge] = [] + start_methods: list[str] = [] + router_methods: list[str] = [] + + for method_name, method in flow._methods.items(): + node_metadata: NodeMetadata = {"type": "listen"} + + if hasattr(method, "__is_start_method__") and method.__is_start_method__: + node_metadata["type"] = "start" + start_methods.append(method_name) + + if hasattr(method, "__is_router__") and method.__is_router__: + node_metadata["is_router"] = True + node_metadata["type"] = "router" + router_methods.append(method_name) + + node_metadata["condition_type"] = "IF" + + if method_name in flow._router_paths: + node_metadata["router_paths"] = [ + str(p) for p in flow._router_paths[method_name] + ] + + if hasattr(method, "__trigger_methods__") and method.__trigger_methods__: + node_metadata["trigger_methods"] = [ + str(m) for m in method.__trigger_methods__ + ] + + if hasattr(method, "__condition_type__") and method.__condition_type__: + if "condition_type" not in node_metadata: + node_metadata["condition_type"] = method.__condition_type__ + + if ( + hasattr(method, "__trigger_condition__") + and method.__trigger_condition__ is not None + ): + node_metadata["trigger_condition"] = method.__trigger_condition__ + + if "trigger_methods" not in node_metadata: + extracted = _extract_all_trigger_names(method.__trigger_condition__) + if extracted: + node_metadata["trigger_methods"] = extracted + + node_metadata["method_signature"] = extract_method_signature( + method, method_name + ) + + try: + source_code = inspect.getsource(method) + node_metadata["source_code"] = source_code + + try: + source_lines, start_line = inspect.getsourcelines(method) + node_metadata["source_lines"] = source_lines + node_metadata["source_start_line"] = start_line + except (OSError, TypeError): + pass + + try: + source_file = inspect.getsourcefile(method) + if source_file: + node_metadata["source_file"] = source_file + except (OSError, TypeError): + try: + class_file = inspect.getsourcefile(flow.__class__) + if class_file: + node_metadata["source_file"] = class_file + except (OSError, TypeError): + pass + except (OSError, TypeError): + pass + + try: + class_obj = flow.__class__ + + if class_obj: + class_name = class_obj.__name__ + + bases = class_obj.__bases__ + if bases: + base_strs = [] + for base in bases: + if hasattr(base, "__name__"): + if hasattr(base, "__origin__"): + base_strs.append(str(base)) + else: + base_strs.append(base.__name__) + else: + base_strs.append(str(base)) + + try: + source_lines = inspect.getsource(class_obj).split("\n") + _, class_start_line = inspect.getsourcelines(class_obj) + + for idx, line in enumerate(source_lines): + stripped = line.strip() + if stripped.startswith("class ") and class_name in stripped: + class_signature = stripped.rstrip(":") + node_metadata["class_signature"] = class_signature + node_metadata["class_line_number"] = ( + class_start_line + idx + ) + break + except (OSError, TypeError): + class_signature = f"class {class_name}({', '.join(base_strs)})" + node_metadata["class_signature"] = class_signature + else: + class_signature = f"class {class_name}" + node_metadata["class_signature"] = class_signature + + node_metadata["class_name"] = class_name + except (OSError, TypeError, AttributeError): + pass + + nodes[method_name] = node_metadata + + for listener_name, condition_data in flow._listeners.items(): + condition_type: str | None = None + trigger_methods_list: list[str] = [] + + if is_simple_flow_condition(condition_data): + cond_type, methods = condition_data + condition_type = cond_type + trigger_methods_list = [str(m) for m in methods] + elif is_flow_condition_dict(condition_data): + condition_type = condition_data.get("type", OR_CONDITION) + methods_recursive = _extract_all_methods_recursive(condition_data, flow) + trigger_methods_list = [str(m) for m in methods_recursive] + + edges.extend( + StructureEdge( + source=str(trigger_method), + target=str(listener_name), + condition_type=condition_type, + is_router_path=False, + ) + for trigger_method in trigger_methods_list + if trigger_method in nodes + ) + + for router_method_name in router_methods: + if router_method_name not in flow._router_paths: + continue + + router_paths = flow._router_paths[FlowMethodName(router_method_name)] + + for path in router_paths: + for listener_name, condition_data in flow._listeners.items(): + trigger_strings_from_cond: list[str] = [] + + if is_simple_flow_condition(condition_data): + _, methods = condition_data + trigger_strings_from_cond = [str(m) for m in methods] + elif is_flow_condition_dict(condition_data): + trigger_strings_from_cond = _extract_direct_or_triggers( + condition_data + ) + + if str(path) in trigger_strings_from_cond: + edges.append( + StructureEdge( + source=router_method_name, + target=str(listener_name), + condition_type=None, + is_router_path=True, + ) + ) + + for start_method in flow._start_methods: + if start_method not in nodes and start_method in flow._methods: + method = flow._methods[start_method] + nodes[str(start_method)] = NodeMetadata(type="start") + + if hasattr(method, "__trigger_methods__") and method.__trigger_methods__: + nodes[str(start_method)]["trigger_methods"] = [ + str(m) for m in method.__trigger_methods__ + ] + if hasattr(method, "__condition_type__") and method.__condition_type__: + nodes[str(start_method)]["condition_type"] = method.__condition_type__ + + return FlowStructure( + nodes=nodes, + edges=edges, + start_methods=start_methods, + router_methods=router_methods, + ) + + +def structure_to_dict(structure: FlowStructure) -> dict[str, Any]: + """Convert FlowStructure to plain dictionary for serialization. + + Args: + structure: FlowStructure to convert. + + Returns: + Plain dictionary representation. + """ + return { + "nodes": dict(structure["nodes"]), + "edges": list(structure["edges"]), + "start_methods": list(structure["start_methods"]), + "router_methods": list(structure["router_methods"]), + } + + +def print_structure_summary(structure: FlowStructure) -> str: + """Generate human-readable summary of Flow structure. + + Args: + structure: FlowStructure to summarize. + + Returns: + Formatted string summary. + """ + lines: list[str] = [] + lines.append("Flow Execution Structure") + lines.append("=" * 50) + lines.append(f"Total nodes: {len(structure['nodes'])}") + lines.append(f"Total edges: {len(structure['edges'])}") + lines.append(f"Start methods: {len(structure['start_methods'])}") + lines.append(f"Router methods: {len(structure['router_methods'])}") + lines.append("") + + if structure["start_methods"]: + lines.append("Start Methods:") + for method_name in structure["start_methods"]: + node = structure["nodes"][method_name] + lines.append(f" - {method_name}") + if node.get("condition_type"): + lines.append(f" Condition: {node['condition_type']}") + if node.get("trigger_methods"): + lines.append(f" Triggers on: {', '.join(node['trigger_methods'])}") + lines.append("") + + if structure["router_methods"]: + lines.append("Router Methods:") + for method_name in structure["router_methods"]: + node = structure["nodes"][method_name] + lines.append(f" - {method_name}") + if node.get("router_paths"): + lines.append(f" Paths: {', '.join(node['router_paths'])}") + lines.append("") + + if structure["edges"]: + lines.append("Connections:") + for edge in structure["edges"]: + edge_type = "" + if edge["is_router_path"]: + edge_type = " [Router Path]" + elif edge["condition_type"]: + edge_type = f" [{edge['condition_type']}]" + + lines.append(f" {edge['source']} -> {edge['target']}{edge_type}") + lines.append("") + + return "\n".join(lines) + + +def calculate_execution_paths(structure: FlowStructure) -> int: + """Calculate number of possible execution paths through the flow. + + Args: + structure: FlowStructure to analyze. + + Returns: + Number of possible execution paths. + """ + graph = defaultdict(list) + for edge in structure["edges"]: + graph[edge["source"]].append( + { + "target": edge["target"], + "is_router": edge["is_router_path"], + "condition": edge["condition_type"], + } + ) + + all_nodes = set(structure["nodes"].keys()) + nodes_with_outgoing = set(edge["source"] for edge in structure["edges"]) + terminal_nodes = all_nodes - nodes_with_outgoing + + if not structure["start_methods"] or not terminal_nodes: + return 0 + + def count_paths_from(node: str, visited: set[str]) -> int: + if node in terminal_nodes: + return 1 + + if node in visited: + return 0 + + visited.add(node) + + outgoing = graph[node] + if not outgoing: + visited.remove(node) + return 1 + + if node in structure["router_methods"]: + total = 0 + for edge_info in outgoing: + target = str(edge_info["target"]) + total += count_paths_from(target, visited.copy()) + visited.remove(node) + return total + + total = 0 + for edge_info in outgoing: + target = str(edge_info["target"]) + total += count_paths_from(target, visited.copy()) + + visited.remove(node) + return total if total > 0 else 1 + + total_paths = 0 + for start in structure["start_methods"]: + total_paths += count_paths_from(start, set()) + + return max(total_paths, 1) diff --git a/lib/crewai/src/crewai/flow/visualization/renderers/__init__.py b/lib/crewai/src/crewai/flow/visualization/renderers/__init__.py new file mode 100644 index 000000000..fede74ee5 --- /dev/null +++ b/lib/crewai/src/crewai/flow/visualization/renderers/__init__.py @@ -0,0 +1,8 @@ +"""Flow structure visualization renderers.""" + +from crewai.flow.visualization.renderers.interactive import render_interactive + + +__all__ = [ + "render_interactive", +] diff --git a/lib/crewai/src/crewai/flow/visualization/renderers/interactive.py b/lib/crewai/src/crewai/flow/visualization/renderers/interactive.py new file mode 100644 index 000000000..6ce0c0fc7 --- /dev/null +++ b/lib/crewai/src/crewai/flow/visualization/renderers/interactive.py @@ -0,0 +1,343 @@ +"""Interactive HTML renderer for Flow structure visualization.""" + +import json +from pathlib import Path +import tempfile +from typing import Any, ClassVar +import webbrowser + +from jinja2 import Environment, FileSystemLoader, nodes, select_autoescape +from jinja2.ext import Extension +from jinja2.parser import Parser + +from crewai.flow.visualization.builder import calculate_execution_paths +from crewai.flow.visualization.types import FlowStructure + + +class CSSExtension(Extension): + """Jinja2 extension for rendering CSS link tags. + + Provides {% css 'path/to/file.css' %} tag syntax. + """ + + tags: ClassVar[set[str]] = {"css"} # type: ignore[assignment] + + def parse(self, parser: Parser) -> nodes.Node: + """Parse {% css 'styles.css' %} tag. + + Args: + parser: Jinja2 parser instance. + + Returns: + Output node with rendered CSS link tag. + """ + lineno: int = next(parser.stream).lineno + args: list[nodes.Expr] = [parser.parse_expression()] + return nodes.Output([self.call_method("_render_css", args)]).set_lineno(lineno) + + def _render_css(self, href: str) -> str: + """Render CSS link tag. + + Args: + href: Path to CSS file. + + Returns: + HTML link tag string. + """ + return f'' + + +class JSExtension(Extension): + """Jinja2 extension for rendering script tags. + + Provides {% js 'path/to/file.js' %} tag syntax. + """ + + tags: ClassVar[set[str]] = {"js"} # type: ignore[assignment] + + def parse(self, parser: Parser) -> nodes.Node: + """Parse {% js 'script.js' %} tag. + + Args: + parser: Jinja2 parser instance. + + Returns: + Output node with rendered script tag. + """ + lineno: int = next(parser.stream).lineno + args: list[nodes.Expr] = [parser.parse_expression()] + return nodes.Output([self.call_method("_render_js", args)]).set_lineno(lineno) + + def _render_js(self, src: str) -> str: + """Render script tag. + + Args: + src: Path to JavaScript file. + + Returns: + HTML script tag string. + """ + return f'' + + +CREWAI_ORANGE = "#FF5A50" +DARK_GRAY = "#333333" +WHITE = "#FFFFFF" +GRAY = "#666666" +BG_DARK = "#0d1117" +BG_CARD = "#161b22" +BORDER_SUBTLE = "#30363d" +TEXT_PRIMARY = "#e6edf3" +TEXT_SECONDARY = "#7d8590" + + +def render_interactive( + dag: FlowStructure, + filename: str = "flow_dag.html", + show: bool = True, +) -> str: + """Create interactive HTML visualization of Flow structure. + + Generates three output files in a temporary directory: HTML template, + CSS stylesheet, and JavaScript. Optionally opens the visualization in + default browser. + + Args: + dag: FlowStructure to visualize. + filename: Output HTML filename (basename only, no path). + show: Whether to open in browser. + + Returns: + Absolute path to generated HTML file in temporary directory. + """ + nodes_list: list[dict[str, Any]] = [] + for name, metadata in dag["nodes"].items(): + node_type: str = metadata.get("type", "listen") + + color_config: dict[str, Any] + font_color: str + border_width: int + + if node_type == "start": + color_config = { + "background": CREWAI_ORANGE, + "border": CREWAI_ORANGE, + "highlight": { + "background": CREWAI_ORANGE, + "border": CREWAI_ORANGE, + }, + } + font_color = WHITE + border_width = 2 + elif node_type == "router": + color_config = { + "background": DARK_GRAY, + "border": CREWAI_ORANGE, + "highlight": { + "background": DARK_GRAY, + "border": CREWAI_ORANGE, + }, + } + font_color = WHITE + border_width = 3 + else: + color_config = { + "background": DARK_GRAY, + "border": DARK_GRAY, + "highlight": { + "background": DARK_GRAY, + "border": DARK_GRAY, + }, + } + font_color = WHITE + border_width = 2 + + title_parts: list[str] = [] + + type_badge_bg: str = ( + CREWAI_ORANGE if node_type in ["start", "router"] else DARK_GRAY + ) + title_parts.append(f""" +
+
{name}
+ {node_type} +
+ """) + + if metadata.get("condition_type"): + condition = metadata["condition_type"] + if condition == "AND": + condition_badge_bg = "rgba(255,90,80,0.12)" + condition_color = CREWAI_ORANGE + elif condition == "IF": + condition_badge_bg = "rgba(255,90,80,0.18)" + condition_color = CREWAI_ORANGE + else: + condition_badge_bg = "rgba(102,102,102,0.12)" + condition_color = GRAY + title_parts.append(f""" +
+
Condition
+ {condition} +
+ """) + + if metadata.get("trigger_methods"): + triggers = metadata["trigger_methods"] + triggers_items = "".join( + [ + f'
  • {t}
  • ' + for t in triggers + ] + ) + title_parts.append(f""" +
    +
    Triggers
    + +
    + """) + + if metadata.get("router_paths"): + paths = metadata["router_paths"] + paths_items = "".join( + [ + f'
  • {p}
  • ' + for p in paths + ] + ) + title_parts.append(f""" +
    +
    Router Paths
    + +
    + """) + + bg_color = color_config["background"] + border_color = color_config["border"] + + nodes_list.append( + { + "id": name, + "label": name, + "title": "".join(title_parts), + "shape": "custom", + "size": 30, + "nodeStyle": { + "name": name, + "bgColor": bg_color, + "borderColor": border_color, + "borderWidth": border_width, + "fontColor": font_color, + }, + "opacity": 1.0, + "glowSize": 0, + "glowColor": None, + } + ) + + execution_paths: int = calculate_execution_paths(dag) + + edges_list: list[dict[str, Any]] = [] + for edge in dag["edges"]: + edge_label: str = "" + edge_color: str = GRAY + edge_dashes: bool | list[int] = False + + if edge["is_router_path"]: + edge_color = CREWAI_ORANGE + edge_dashes = [15, 10] + elif edge["condition_type"] == "AND": + edge_label = "AND" + edge_color = CREWAI_ORANGE + elif edge["condition_type"] == "OR": + edge_label = "OR" + edge_color = GRAY + + edge_data: dict[str, Any] = { + "from": edge["source"], + "to": edge["target"], + "label": edge_label, + "arrows": "to", + "width": 2, + "selectionWidth": 0, + "color": { + "color": edge_color, + "highlight": edge_color, + }, + } + + if edge_dashes is not False: + edge_data["dashes"] = edge_dashes + + edges_list.append(edge_data) + + template_dir = Path(__file__).parent.parent / "assets" + env = Environment( + loader=FileSystemLoader(template_dir), + autoescape=select_autoescape(["html", "xml", "css", "js"]), + variable_start_string="'{{", + variable_end_string="}}'", + extensions=[CSSExtension, JSExtension], + ) + + temp_dir = Path(tempfile.mkdtemp(prefix="crewai_flow_")) + output_path = temp_dir / Path(filename).name + css_filename = output_path.stem + "_style.css" + css_output_path = temp_dir / css_filename + js_filename = output_path.stem + "_script.js" + js_output_path = temp_dir / js_filename + + css_file = template_dir / "style.css" + css_content = css_file.read_text(encoding="utf-8") + + css_content = css_content.replace("'{{ WHITE }}'", WHITE) + css_content = css_content.replace("'{{ DARK_GRAY }}'", DARK_GRAY) + css_content = css_content.replace("'{{ GRAY }}'", GRAY) + css_content = css_content.replace("'{{ CREWAI_ORANGE }}'", CREWAI_ORANGE) + + css_output_path.write_text(css_content, encoding="utf-8") + + js_file = template_dir / "interactive.js" + js_content = js_file.read_text(encoding="utf-8") + + dag_nodes_json = json.dumps(dag["nodes"]) + dag_full_json = json.dumps(dag) + + js_content = js_content.replace("{{ WHITE }}", WHITE) + js_content = js_content.replace("{{ DARK_GRAY }}", DARK_GRAY) + js_content = js_content.replace("{{ GRAY }}", GRAY) + js_content = js_content.replace("{{ CREWAI_ORANGE }}", CREWAI_ORANGE) + js_content = js_content.replace("'{{ nodeData }}'", dag_nodes_json) + js_content = js_content.replace("'{{ dagData }}'", dag_full_json) + js_content = js_content.replace("'{{ nodes_list_json }}'", json.dumps(nodes_list)) + js_content = js_content.replace("'{{ edges_list_json }}'", json.dumps(edges_list)) + + js_output_path.write_text(js_content, encoding="utf-8") + + template = env.get_template("interactive_flow.html.j2") + + html_content = template.render( + CREWAI_ORANGE=CREWAI_ORANGE, + DARK_GRAY=DARK_GRAY, + WHITE=WHITE, + GRAY=GRAY, + BG_DARK=BG_DARK, + BG_CARD=BG_CARD, + BORDER_SUBTLE=BORDER_SUBTLE, + TEXT_PRIMARY=TEXT_PRIMARY, + TEXT_SECONDARY=TEXT_SECONDARY, + nodes_list_json=json.dumps(nodes_list), + edges_list_json=json.dumps(edges_list), + dag_nodes_count=len(dag["nodes"]), + dag_edges_count=len(dag["edges"]), + execution_paths=execution_paths, + css_path=css_filename, + js_path=js_filename, + ) + + output_path.write_text(html_content, encoding="utf-8") + + if show: + webbrowser.open(f"file://{output_path.absolute()}") + + return str(output_path.absolute()) diff --git a/lib/crewai/src/crewai/flow/visualization/schema.py b/lib/crewai/src/crewai/flow/visualization/schema.py new file mode 100644 index 000000000..fe0de7fd1 --- /dev/null +++ b/lib/crewai/src/crewai/flow/visualization/schema.py @@ -0,0 +1,104 @@ +"""OpenAPI schema conversion utilities for Flow methods.""" + +import inspect +from typing import Any, get_args, get_origin + + +def type_to_openapi_schema(type_hint: Any) -> dict[str, Any]: + """Convert Python type hint to OpenAPI schema. + + Args: + type_hint: Python type hint to convert. + + Returns: + OpenAPI schema dictionary. + """ + if type_hint is inspect.Parameter.empty: + return {} + + if type_hint is None or type_hint is type(None): + return {"type": "null"} + + if hasattr(type_hint, "__module__") and hasattr(type_hint, "__name__"): + if type_hint.__module__ == "typing" and type_hint.__name__ == "Any": + return {} + + type_str = str(type_hint) + if type_str == "typing.Any" or type_str == "": + return {} + + if isinstance(type_hint, str): + return {"type": type_hint} + + origin = get_origin(type_hint) + args = get_args(type_hint) + + if type_hint is str: + return {"type": "string"} + if type_hint is int: + return {"type": "integer"} + if type_hint is float: + return {"type": "number"} + if type_hint is bool: + return {"type": "boolean"} + if type_hint is dict or origin is dict: + if args and len(args) > 1: + return { + "type": "object", + "additionalProperties": type_to_openapi_schema(args[1]), + } + return {"type": "object"} + if type_hint is list or origin is list: + if args: + return {"type": "array", "items": type_to_openapi_schema(args[0])} + return {"type": "array"} + if hasattr(type_hint, "__name__"): + return {"type": "object", "className": type_hint.__name__} + + return {} + + +def extract_method_signature(method: Any, method_name: str) -> dict[str, Any]: + """Extract method signature as OpenAPI schema with documentation. + + Args: + method: Method to analyze. + method_name: Method name. + + Returns: + Dictionary with operationId, parameters, returns, summary, and description. + """ + try: + sig = inspect.signature(method) + + parameters = {} + for param_name, param in sig.parameters.items(): + if param_name == "self": + continue + parameters[param_name] = type_to_openapi_schema(param.annotation) + + return_type = type_to_openapi_schema(sig.return_annotation) + + docstring = inspect.getdoc(method) + + result: dict[str, Any] = { + "operationId": method_name, + "parameters": parameters, + "returns": return_type, + } + + if docstring: + lines = docstring.strip().split("\n") + summary = lines[0].strip() + + if summary: + result["summary"] = summary + + if len(lines) > 1: + description = "\n".join(line.strip() for line in lines[1:]).strip() + if description: + result["description"] = description + + return result + except Exception: + return {"operationId": method_name, "parameters": {}, "returns": {}} diff --git a/lib/crewai/src/crewai/flow/visualization/types.py b/lib/crewai/src/crewai/flow/visualization/types.py new file mode 100644 index 000000000..6cb165bc4 --- /dev/null +++ b/lib/crewai/src/crewai/flow/visualization/types.py @@ -0,0 +1,40 @@ +"""Type definitions for Flow structure visualization.""" + +from typing import Any, TypedDict + + +class NodeMetadata(TypedDict, total=False): + """Metadata for a single node in the flow structure.""" + + type: str + is_router: bool + router_paths: list[str] + condition_type: str | None + trigger_methods: list[str] + trigger_condition: dict[str, Any] | None + method_signature: dict[str, Any] + source_code: str + source_lines: list[str] + source_start_line: int + source_file: str + class_signature: str + class_name: str + class_line_number: int + + +class StructureEdge(TypedDict): + """Represents a connection in the flow structure.""" + + source: str + target: str + condition_type: str | None + is_router_path: bool + + +class FlowStructure(TypedDict): + """Complete structure representation of a Flow.""" + + nodes: dict[str, NodeMetadata] + edges: list[StructureEdge] + start_methods: list[str] + router_methods: list[str] diff --git a/lib/crewai/src/crewai/flow/visualization_utils.py b/lib/crewai/src/crewai/flow/visualization_utils.py deleted file mode 100644 index ea276cf97..000000000 --- a/lib/crewai/src/crewai/flow/visualization_utils.py +++ /dev/null @@ -1,342 +0,0 @@ -""" -Utilities for creating visual representations of flow structures. - -This module provides functions for generating network visualizations of flows, -including node placement, edge creation, and visual styling. It handles the -conversion of flow structures into visual network graphs with appropriate -styling and layout. - -Example -------- ->>> flow = Flow() ->>> net = Network(directed=True) ->>> node_positions = compute_positions(flow, node_levels) ->>> add_nodes_to_network(net, flow, node_positions, node_styles) ->>> add_edges(net, flow, node_positions, colors) -""" - -import ast -import inspect -from typing import Any - -from crewai.flow.config import ( - CrewNodeStyle, - FlowColors, - MethodNodeStyle, - NodeStyles, - RouterNodeStyle, - StartNodeStyle, -) -from crewai.flow.utils import ( - build_ancestor_dict, - build_parent_children_dict, - get_child_index, - is_ancestor, -) -from crewai.utilities.printer import Printer - - -_printer = Printer() - - -def method_calls_crew(method: Any) -> bool: - """ - Check if the method contains a call to `.crew()`, `.kickoff()`, or `.kickoff_async()`. - - Parameters - ---------- - method : Any - The method to analyze for crew or agent execution calls. - - Returns - ------- - bool - True if the method calls .crew(), .kickoff(), or .kickoff_async(), False otherwise. - - Notes - ----- - Uses AST analysis to detect method calls, specifically looking for - attribute access of 'crew', 'kickoff', or 'kickoff_async'. - This includes both traditional Crew execution (.crew()) and Agent/LiteAgent - execution (.kickoff() or .kickoff_async()). - """ - try: - source = inspect.getsource(method) - source = inspect.cleandoc(source) - tree = ast.parse(source) - except Exception as e: - _printer.print(f"Could not parse method {method.__name__}: {e}", color="red") - return False - - class CrewCallVisitor(ast.NodeVisitor): - """AST visitor to detect .crew(), .kickoff(), or .kickoff_async() method calls.""" - - def __init__(self) -> None: - self.found = False - - def visit_Call(self, node: ast.Call) -> None: - if isinstance(node.func, ast.Attribute): - if node.func.attr in ("crew", "kickoff", "kickoff_async"): - self.found = True - self.generic_visit(node) - - visitor = CrewCallVisitor() - visitor.visit(tree) - return visitor.found - - -def add_nodes_to_network( - net: Any, - flow: Any, - node_positions: dict[str, tuple[float, float]], - node_styles: NodeStyles, -) -> None: - """ - Add nodes to the network visualization with appropriate styling. - - Parameters - ---------- - net : Any - The pyvis Network instance to add nodes to. - flow : Any - The flow instance containing method information. - node_positions : Dict[str, Tuple[float, float]] - Dictionary mapping node names to their (x, y) positions. - node_styles : Dict[str, Dict[str, Any]] - Dictionary containing style configurations for different node types. - - Notes - ----- - Node types include: - - Start methods - - Router methods - - Crew methods - - Regular methods - """ - - def human_friendly_label(method_name: str) -> str: - return method_name.replace("_", " ").title() - - node_style: ( - StartNodeStyle | RouterNodeStyle | CrewNodeStyle | MethodNodeStyle | None - ) - for method_name, (x, y) in node_positions.items(): - method = flow._methods.get(method_name) - if hasattr(method, "__is_start_method__"): - node_style = node_styles["start"] - elif hasattr(method, "__is_router__"): - node_style = node_styles["router"] - elif method_calls_crew(method): - node_style = node_styles["crew"] - else: - node_style = node_styles["method"] - - node_style = node_style.copy() - label = human_friendly_label(method_name) - - node_style.update( - { - "label": label, - "shape": "box", - "font": { - "multi": "html", - "color": node_style.get("font", {}).get("color", "#FFFFFF"), - }, - } - ) - - net.add_node( - method_name, - x=x, - y=y, - fixed=True, - physics=False, - **node_style, - ) - - -def compute_positions( - flow: Any, - node_levels: dict[str, int], - y_spacing: float = 150, - x_spacing: float = 300, -) -> dict[str, tuple[float, float]]: - """ - Compute the (x, y) positions for each node in the flow graph. - - Parameters - ---------- - flow : Any - The flow instance to compute positions for. - node_levels : Dict[str, int] - Dictionary mapping node names to their hierarchical levels. - y_spacing : float, optional - Vertical spacing between levels, by default 150. - x_spacing : float, optional - Horizontal spacing between nodes, by default 300. - - Returns - ------- - Dict[str, Tuple[float, float]] - Dictionary mapping node names to their (x, y) coordinates. - """ - level_nodes: dict[int, list[str]] = {} - node_positions: dict[str, tuple[float, float]] = {} - - for method_name, level in node_levels.items(): - level_nodes.setdefault(level, []).append(method_name) - - for level, nodes in level_nodes.items(): - x_offset = -(len(nodes) - 1) * x_spacing / 2 # Center nodes horizontally - for i, method_name in enumerate(nodes): - x = x_offset + i * x_spacing - y = level * y_spacing - node_positions[method_name] = (x, y) - - return node_positions - - -def add_edges( - net: Any, - flow: Any, - node_positions: dict[str, tuple[float, float]], - colors: FlowColors, -) -> None: - edge_smooth: dict[str, str | float] = {"type": "continuous"} # Default value - """ - Add edges to the network visualization with appropriate styling. - - Parameters - ---------- - net : Any - The pyvis Network instance to add edges to. - flow : Any - The flow instance containing edge information. - node_positions : Dict[str, Tuple[float, float]] - Dictionary mapping node names to their positions. - colors : Dict[str, str] - Dictionary mapping edge types to their colors. - - Notes - ----- - - Handles both normal listener edges and router edges - - Applies appropriate styling (color, dashes) based on edge type - - Adds curvature to edges when needed (cycles or multiple children) - """ - ancestors = build_ancestor_dict(flow) - parent_children = build_parent_children_dict(flow) - - # Edges for normal listeners - for method_name in flow._listeners: - condition_type, trigger_methods = flow._listeners[method_name] - is_and_condition = condition_type == "AND" - - for trigger in trigger_methods: - # Check if nodes exist before adding edges - if trigger in node_positions and method_name in node_positions: - is_router_edge = any( - trigger in paths for paths in flow._router_paths.values() - ) - edge_color = colors["router_edge"] if is_router_edge else colors["edge"] - - is_cycle_edge = is_ancestor(trigger, method_name, ancestors) - parent_has_multiple_children = len(parent_children.get(trigger, [])) > 1 - needs_curvature = is_cycle_edge or parent_has_multiple_children - - if needs_curvature: - source_pos = node_positions.get(trigger) - target_pos = node_positions.get(method_name) - - if source_pos and target_pos: - dx = target_pos[0] - source_pos[0] - smooth_type = "curvedCCW" if dx <= 0 else "curvedCW" - index = get_child_index(trigger, method_name, parent_children) - edge_smooth = { - "type": smooth_type, - "roundness": 0.2 + (0.1 * index), - } - else: - edge_smooth = {"type": "cubicBezier"} - else: - edge_smooth.update({"type": "continuous"}) - - edge_style = { - "color": edge_color, - "width": 2, - "arrows": "to", - "dashes": True if is_router_edge or is_and_condition else False, - "smooth": edge_smooth, - } - - net.add_edge(trigger, method_name, **edge_style) - else: - # Nodes not found in node_positions. Check if it's a known router outcome and a known method. - is_router_edge = any( - trigger in paths for paths in flow._router_paths.values() - ) - # Check if method_name is a known method - method_known = method_name in flow._methods - - # If it's a known router edge and the method is known, don't warn. - # This means the path is legitimate, just not reflected as nodes here. - if not (is_router_edge and method_known): - _printer.print( - f"Warning: No node found for '{trigger}' or '{method_name}'. Skipping edge.", - color="yellow", - ) - - # Edges for router return paths - for router_method_name, paths in flow._router_paths.items(): - for path in paths: - for listener_name, ( - _condition_type, - trigger_methods, - ) in flow._listeners.items(): - if path in trigger_methods: - if ( - router_method_name in node_positions - and listener_name in node_positions - ): - is_cycle_edge = is_ancestor( - router_method_name, listener_name, ancestors - ) - parent_has_multiple_children = ( - len(parent_children.get(router_method_name, [])) > 1 - ) - needs_curvature = is_cycle_edge or parent_has_multiple_children - - if needs_curvature: - source_pos = node_positions.get(router_method_name) - target_pos = node_positions.get(listener_name) - - if source_pos and target_pos: - dx = target_pos[0] - source_pos[0] - smooth_type = "curvedCCW" if dx <= 0 else "curvedCW" - index = get_child_index( - router_method_name, listener_name, parent_children - ) - edge_smooth = { - "type": smooth_type, - "roundness": 0.2 + (0.1 * index), - } - else: - edge_smooth = {"type": "cubicBezier"} - else: - edge_smooth.update({"type": "continuous"}) - - edge_style = { - "color": colors["router_edge"], - "width": 2, - "arrows": "to", - "dashes": True, - "smooth": edge_smooth, - } - net.add_edge(router_method_name, listener_name, **edge_style) - else: - # Same check here: known router edge and known method? - method_known = listener_name in flow._methods - if not method_known: - _printer.print( - f"Warning: No node found for '{router_method_name}' or '{listener_name}'. Skipping edge.", - color="yellow", - ) diff --git a/lib/crewai/tests/test_flow.py b/lib/crewai/tests/test_flow.py index 12a6fb344..c6a994d39 100644 --- a/lib/crewai/tests/test_flow.py +++ b/lib/crewai/tests/test_flow.py @@ -850,31 +850,6 @@ def test_flow_plotting(): assert isinstance(received_events[0].timestamp, datetime) -def test_method_calls_crew_detection(): - """Test that method_calls_crew() detects .crew(), .kickoff(), and .kickoff_async() calls.""" - from crewai.flow.visualization_utils import method_calls_crew - from crewai import Agent - - # Test with a real Flow that uses agent.kickoff() - class FlowWithAgentKickoff(Flow): - @start() - def run_agent(self): - agent = Agent(role="test", goal="test", backstory="test") - return agent.kickoff("query") - - flow = FlowWithAgentKickoff() - assert method_calls_crew(flow.run_agent) is True - - # Test with a Flow that has no crew/agent calls - class FlowWithoutCrewCalls(Flow): - @start() - def simple_method(self): - return "Just a regular method" - - flow2 = FlowWithoutCrewCalls() - assert method_calls_crew(flow2.simple_method) is False - - def test_multiple_routers_from_same_trigger(): """Test that multiple routers triggered by the same method all activate their listeners.""" execution_order = [] @@ -1058,3 +1033,354 @@ def test_nested_and_or_conditions(): # method_8 should execute after method_7 assert execution_order.index("method_8") > execution_order.index("method_7") + + +def test_diamond_dependency_pattern(): + """Test diamond pattern where two parallel paths converge at a final step.""" + execution_order = [] + + class DiamondFlow(Flow): + @start() + def start(self): + execution_order.append("start") + return "started" + + @listen(start) + def path_a(self): + execution_order.append("path_a") + return "a_done" + + @listen(start) + def path_b(self): + execution_order.append("path_b") + return "b_done" + + @listen(and_(path_a, path_b)) + def converge(self): + execution_order.append("converge") + return "converged" + + flow = DiamondFlow() + flow.kickoff() + + # Start should execute first + assert execution_order[0] == "start" + + # Both paths should execute after start + assert "path_a" in execution_order + assert "path_b" in execution_order + assert execution_order.index("path_a") > execution_order.index("start") + assert execution_order.index("path_b") > execution_order.index("start") + + # Converge should be last and after both paths + assert execution_order[-1] == "converge" + assert execution_order.index("converge") > execution_order.index("path_a") + assert execution_order.index("converge") > execution_order.index("path_b") + + +def test_router_cascade_chain(): + """Test a chain of routers where each router triggers the next.""" + execution_order = [] + + class RouterCascadeFlow(Flow): + def __init__(self): + super().__init__() + self.state["level"] = 1 + + @start() + def begin(self): + execution_order.append("begin") + return "started" + + @router(begin) + def router_level_1(self): + execution_order.append("router_level_1") + return "level_1_path" + + @listen("level_1_path") + def process_level_1(self): + execution_order.append("process_level_1") + self.state["level"] = 2 + return "level_1_done" + + @router(process_level_1) + def router_level_2(self): + execution_order.append("router_level_2") + return "level_2_path" + + @listen("level_2_path") + def process_level_2(self): + execution_order.append("process_level_2") + self.state["level"] = 3 + return "level_2_done" + + @router(process_level_2) + def router_level_3(self): + execution_order.append("router_level_3") + return "final_path" + + @listen("final_path") + def finalize(self): + execution_order.append("finalize") + return "complete" + + flow = RouterCascadeFlow() + flow.kickoff() + + expected_order = [ + "begin", + "router_level_1", + "process_level_1", + "router_level_2", + "process_level_2", + "router_level_3", + "finalize", + ] + + assert execution_order == expected_order + assert flow.state["level"] == 3 + + +def test_complex_and_or_branching(): + """Test complex branching with multiple AND and OR conditions.""" + execution_order = [] + + class ComplexBranchingFlow(Flow): + @start() + def init(self): + execution_order.append("init") + + @listen(init) + def branch_1a(self): + execution_order.append("branch_1a") + + @listen(init) + def branch_1b(self): + execution_order.append("branch_1b") + + @listen(init) + def branch_1c(self): + execution_order.append("branch_1c") + + # Requires 1a AND 1b (ignoring 1c) + @listen(and_(branch_1a, branch_1b)) + def branch_2a(self): + execution_order.append("branch_2a") + + # Requires any of 1a, 1b, or 1c + @listen(or_(branch_1a, branch_1b, branch_1c)) + def branch_2b(self): + execution_order.append("branch_2b") + + # Final step requires 2a AND 2b + @listen(and_(branch_2a, branch_2b)) + def final(self): + execution_order.append("final") + + flow = ComplexBranchingFlow() + flow.kickoff() + + # Verify all branches executed + assert "init" in execution_order + assert "branch_1a" in execution_order + assert "branch_1b" in execution_order + assert "branch_1c" in execution_order + assert "branch_2a" in execution_order + assert "branch_2b" in execution_order + assert "final" in execution_order + + # Verify order constraints + assert execution_order.index("branch_2a") > execution_order.index("branch_1a") + assert execution_order.index("branch_2a") > execution_order.index("branch_1b") + + # branch_2b should trigger after at least one of 1a, 1b, or 1c + min_branch_1_index = min( + execution_order.index("branch_1a"), + execution_order.index("branch_1b"), + execution_order.index("branch_1c"), + ) + assert execution_order.index("branch_2b") > min_branch_1_index + + # Final should be last and after both 2a and 2b + assert execution_order[-1] == "final" + assert execution_order.index("final") > execution_order.index("branch_2a") + assert execution_order.index("final") > execution_order.index("branch_2b") + + +def test_conditional_router_paths_exclusivity(): + """Test that only the returned router path activates, not all paths.""" + execution_order = [] + + class ConditionalRouterFlow(Flow): + def __init__(self): + super().__init__() + self.state["condition"] = "take_path_b" + + @start() + def begin(self): + execution_order.append("begin") + + @router(begin) + def decision_point(self): + execution_order.append("decision_point") + if self.state["condition"] == "take_path_a": + return "path_a" + elif self.state["condition"] == "take_path_b": + return "path_b" + else: + return "path_c" + + @listen("path_a") + def handle_path_a(self): + execution_order.append("handle_path_a") + + @listen("path_b") + def handle_path_b(self): + execution_order.append("handle_path_b") + + @listen("path_c") + def handle_path_c(self): + execution_order.append("handle_path_c") + + flow = ConditionalRouterFlow() + flow.kickoff() + + # Should only execute path_b, not path_a or path_c + assert "begin" in execution_order + assert "decision_point" in execution_order + assert "handle_path_b" in execution_order + assert "handle_path_a" not in execution_order + assert "handle_path_c" not in execution_order + + +def test_state_consistency_across_parallel_branches(): + """Test that state remains consistent when branches execute sequentially. + + Note: Branches triggered by the same parent execute sequentially, not in parallel. + This ensures predictable state mutations and prevents race conditions. + """ + execution_order = [] + + class StateConsistencyFlow(Flow): + def __init__(self): + super().__init__() + self.state["counter"] = 0 + self.state["branch_a_value"] = None + self.state["branch_b_value"] = None + + @start() + def init(self): + execution_order.append("init") + self.state["counter"] = 10 + + @listen(init) + def branch_a(self): + execution_order.append("branch_a") + # Read counter value + self.state["branch_a_value"] = self.state["counter"] + self.state["counter"] += 1 + + @listen(init) + def branch_b(self): + execution_order.append("branch_b") + # Read counter value + self.state["branch_b_value"] = self.state["counter"] + self.state["counter"] += 5 + + @listen(and_(branch_a, branch_b)) + def verify_state(self): + execution_order.append("verify_state") + + flow = StateConsistencyFlow() + flow.kickoff() + + # Branches execute sequentially, so branch_a runs first, then branch_b + assert flow.state["branch_a_value"] == 10 # Sees initial value + assert flow.state["branch_b_value"] == 11 # Sees value after branch_a increment + + # Final counter should reflect both increments sequentially + assert flow.state["counter"] == 16 # 10 + 1 + 5 + + +def test_deeply_nested_conditions(): + """Test deeply nested AND/OR conditions to ensure proper evaluation.""" + execution_order = [] + + class DeeplyNestedFlow(Flow): + @start() + def a(self): + execution_order.append("a") + + @start() + def b(self): + execution_order.append("b") + + @start() + def c(self): + execution_order.append("c") + + @start() + def d(self): + execution_order.append("d") + + # Nested: (a AND b) OR (c AND d) + @listen(or_(and_(a, b), and_(c, d))) + def result(self): + execution_order.append("result") + + flow = DeeplyNestedFlow() + flow.kickoff() + + # All start methods should execute + assert "a" in execution_order + assert "b" in execution_order + assert "c" in execution_order + assert "d" in execution_order + + # Result should execute after all starts + assert "result" in execution_order + assert execution_order.index("result") > execution_order.index("a") + assert execution_order.index("result") > execution_order.index("b") + assert execution_order.index("result") > execution_order.index("c") + assert execution_order.index("result") > execution_order.index("d") + + +def test_mixed_sync_async_execution_order(): + """Test that execution order is preserved with mixed sync/async methods.""" + execution_order = [] + + class MixedSyncAsyncFlow(Flow): + @start() + def sync_start(self): + execution_order.append("sync_start") + + @listen(sync_start) + async def async_step_1(self): + execution_order.append("async_step_1") + await asyncio.sleep(0.01) + + @listen(async_step_1) + def sync_step_2(self): + execution_order.append("sync_step_2") + + @listen(sync_step_2) + async def async_step_3(self): + execution_order.append("async_step_3") + await asyncio.sleep(0.01) + + @listen(async_step_3) + def sync_final(self): + execution_order.append("sync_final") + + flow = MixedSyncAsyncFlow() + asyncio.run(flow.kickoff_async()) + + expected_order = [ + "sync_start", + "async_step_1", + "sync_step_2", + "async_step_3", + "sync_final", + ] + + assert execution_order == expected_order diff --git a/lib/crewai/tests/test_flow_visualization.py b/lib/crewai/tests/test_flow_visualization.py new file mode 100644 index 000000000..1fd78340e --- /dev/null +++ b/lib/crewai/tests/test_flow_visualization.py @@ -0,0 +1,497 @@ +"""Tests for flow visualization and structure building.""" + +import json +import os +import tempfile +from pathlib import Path + +import pytest + +from crewai.flow.flow import Flow, and_, listen, or_, router, start +from crewai.flow.visualization import ( + build_flow_structure, + print_structure_summary, + structure_to_dict, + visualize_flow_structure, +) + + +class SimpleFlow(Flow): + """Simple flow for testing basic visualization.""" + + @start() + def begin(self): + return "started" + + @listen(begin) + def process(self): + return "processed" + + +class RouterFlow(Flow): + """Flow with router for testing router visualization.""" + + @start() + def init(self): + return "initialized" + + @router(init) + def decide(self): + if hasattr(self, "state") and self.state.get("path") == "b": + return "path_b" + return "path_a" + + @listen("path_a") + def handle_a(self): + return "handled_a" + + @listen("path_b") + def handle_b(self): + return "handled_b" + + +class ComplexFlow(Flow): + """Complex flow with AND/OR conditions for testing.""" + + @start() + def start_a(self): + return "a" + + @start() + def start_b(self): + return "b" + + @listen(and_(start_a, start_b)) + def converge_and(self): + return "and_done" + + @listen(or_(start_a, start_b)) + def converge_or(self): + return "or_done" + + @router(converge_and) + def router_decision(self): + return "final_path" + + @listen("final_path") + def finalize(self): + return "complete" + + +def test_build_flow_structure_simple(): + """Test building structure for a simple sequential flow.""" + flow = SimpleFlow() + structure = build_flow_structure(flow) + + assert structure is not None + assert len(structure["nodes"]) == 2 + assert len(structure["edges"]) == 1 + + node_names = set(structure["nodes"].keys()) + assert "begin" in node_names + assert "process" in node_names + + assert len(structure["start_methods"]) == 1 + assert "begin" in structure["start_methods"] + + edge = structure["edges"][0] + assert edge["source"] == "begin" + assert edge["target"] == "process" + assert edge["condition_type"] == "OR" + + +def test_build_flow_structure_with_router(): + """Test building structure for a flow with router.""" + flow = RouterFlow() + structure = build_flow_structure(flow) + + assert structure is not None + assert len(structure["nodes"]) == 4 + + assert len(structure["router_methods"]) == 1 + assert "decide" in structure["router_methods"] + + router_node = structure["nodes"]["decide"] + assert router_node["type"] == "router" + + if "router_paths" in router_node: + assert len(router_node["router_paths"]) >= 1 + assert any("path" in path for path in router_node["router_paths"]) + + router_edges = [edge for edge in structure["edges"] if edge["is_router_path"]] + assert len(router_edges) >= 1 + + +def test_build_flow_structure_with_and_or_conditions(): + """Test building structure for a flow with AND/OR conditions.""" + flow = ComplexFlow() + structure = build_flow_structure(flow) + + assert structure is not None + + and_edges = [ + edge + for edge in structure["edges"] + if edge["target"] == "converge_and" and edge["condition_type"] == "AND" + ] + assert len(and_edges) == 2 + + or_edges = [ + edge + for edge in structure["edges"] + if edge["target"] == "converge_or" and edge["condition_type"] == "OR" + ] + assert len(or_edges) == 2 + + +def test_structure_to_dict(): + """Test converting flow structure to dictionary format.""" + flow = SimpleFlow() + structure = build_flow_structure(flow) + dag_dict = structure_to_dict(structure) + + assert "nodes" in dag_dict + assert "edges" in dag_dict + assert "start_methods" in dag_dict + assert "router_methods" in dag_dict + + assert "begin" in dag_dict["nodes"] + assert "process" in dag_dict["nodes"] + + begin_node = dag_dict["nodes"]["begin"] + assert begin_node["type"] == "start" + assert "method_signature" in begin_node + assert "source_code" in begin_node + + assert len(dag_dict["edges"]) == 1 + edge = dag_dict["edges"][0] + assert "source" in edge + assert "target" in edge + assert "condition_type" in edge + assert "is_router_path" in edge + + +def test_structure_to_dict_with_router(): + """Test dictionary conversion for flow with router.""" + flow = RouterFlow() + structure = build_flow_structure(flow) + dag_dict = structure_to_dict(structure) + + decide_node = dag_dict["nodes"]["decide"] + assert decide_node["type"] == "router" + assert decide_node["is_router"] is True + + if "router_paths" in decide_node: + assert len(decide_node["router_paths"]) >= 1 + + router_edges = [edge for edge in dag_dict["edges"] if edge["is_router_path"]] + assert len(router_edges) >= 1 + + +def test_structure_to_dict_with_complex_conditions(): + """Test dictionary conversion for flow with complex conditions.""" + flow = ComplexFlow() + structure = build_flow_structure(flow) + dag_dict = structure_to_dict(structure) + + converge_and_node = dag_dict["nodes"]["converge_and"] + assert converge_and_node["condition_type"] == "AND" + assert "trigger_condition" in converge_and_node + assert converge_and_node["trigger_condition"]["type"] == "AND" + + converge_or_node = dag_dict["nodes"]["converge_or"] + assert converge_or_node["condition_type"] == "OR" + + +def test_visualize_flow_structure_creates_html(): + """Test that visualization generates valid HTML file.""" + flow = SimpleFlow() + structure = build_flow_structure(flow) + + html_file = visualize_flow_structure(structure, "test_flow.html", show=False) + + assert os.path.exists(html_file) + + with open(html_file, "r", encoding="utf-8") as f: + html_content = f.read() + + assert "" in html_content + assert " 0 + assert "body" in css_content or ":root" in css_content + + js_content = js_file.read_text(encoding="utf-8") + assert len(js_content) > 0 + assert "var nodes" in js_content or "const nodes" in js_content + + +def test_visualize_flow_structure_json_data(): + """Test that visualization includes valid JSON data in JS file.""" + flow = RouterFlow() + structure = build_flow_structure(flow) + + html_file = visualize_flow_structure(structure, "test_flow.html", show=False) + html_path = Path(html_file) + + js_file = html_path.parent / f"{html_path.stem}_script.js" + + js_content = js_file.read_text(encoding="utf-8") + + assert "init" in js_content + assert "decide" in js_content + assert "handle_a" in js_content + assert "handle_b" in js_content + + assert "router" in js_content.lower() + assert "path_a" in js_content + assert "path_b" in js_content + + +def test_print_structure_summary(): + """Test printing flow structure summary.""" + flow = ComplexFlow() + structure = build_flow_structure(flow) + + output = print_structure_summary(structure) + + assert "Total nodes:" in output + assert "Total edges:" in output + assert "Start methods:" in output + assert "Router methods:" in output + + assert "start_a" in output + assert "start_b" in output + + +def test_node_metadata_includes_source_info(): + """Test that nodes include source code and line number information.""" + flow = SimpleFlow() + structure = build_flow_structure(flow) + + for node_name, node_metadata in structure["nodes"].items(): + assert node_metadata["source_code"] is not None + assert len(node_metadata["source_code"]) > 0 + assert node_metadata["source_start_line"] is not None + assert node_metadata["source_start_line"] > 0 + assert node_metadata["source_file"] is not None + assert node_metadata["source_file"].endswith(".py") + + +def test_node_metadata_includes_method_signature(): + """Test that nodes include method signature information.""" + flow = SimpleFlow() + structure = build_flow_structure(flow) + + begin_node = structure["nodes"]["begin"] + assert begin_node["method_signature"] is not None + assert "operationId" in begin_node["method_signature"] + assert begin_node["method_signature"]["operationId"] == "begin" + assert "parameters" in begin_node["method_signature"] + assert "returns" in begin_node["method_signature"] + + +def test_router_node_has_correct_metadata(): + """Test that router nodes have correct type and paths.""" + flow = RouterFlow() + structure = build_flow_structure(flow) + + router_node = structure["nodes"]["decide"] + assert router_node["type"] == "router" + assert router_node["is_router"] is True + assert router_node["router_paths"] is not None + assert len(router_node["router_paths"]) == 2 + assert "path_a" in router_node["router_paths"] + assert "path_b" in router_node["router_paths"] + + +def test_listen_node_has_trigger_methods(): + """Test that listen nodes include trigger method information.""" + flow = RouterFlow() + structure = build_flow_structure(flow) + + handle_a_node = structure["nodes"]["handle_a"] + assert handle_a_node["trigger_methods"] is not None + assert "path_a" in handle_a_node["trigger_methods"] + + +def test_and_condition_node_metadata(): + """Test that AND condition nodes have correct metadata.""" + flow = ComplexFlow() + structure = build_flow_structure(flow) + + converge_and_node = structure["nodes"]["converge_and"] + assert converge_and_node["condition_type"] == "AND" + assert converge_and_node["trigger_condition"] is not None + assert converge_and_node["trigger_condition"]["type"] == "AND" + assert len(converge_and_node["trigger_condition"]["conditions"]) == 2 + + +def test_visualization_handles_special_characters(): + """Test that visualization properly handles special characters in method names.""" + + class SpecialCharFlow(Flow): + @start() + def method_with_underscore(self): + return "test" + + @listen(method_with_underscore) + def another_method_123(self): + return "done" + + flow = SpecialCharFlow() + structure = build_flow_structure(flow) + + assert len(structure["nodes"]) == 2 + + dag_dict = structure_to_dict(structure) + json_str = json.dumps(dag_dict) + assert json_str is not None + assert "method_with_underscore" in json_str + assert "another_method_123" in json_str + + +def test_empty_flow_structure(): + """Test building structure for a flow with no methods.""" + + class EmptyFlow(Flow): + pass + + flow = EmptyFlow() + + structure = build_flow_structure(flow) + assert structure is not None + assert len(structure["nodes"]) == 0 + assert len(structure["edges"]) == 0 + assert len(structure["start_methods"]) == 0 + + +def test_topological_path_counting(): + """Test that topological path counting is accurate.""" + flow = ComplexFlow() + structure = build_flow_structure(flow) + dag_dict = structure_to_dict(structure) + + assert len(structure["nodes"]) > 0 + assert len(structure["edges"]) > 0 + + +def test_class_signature_metadata(): + """Test that nodes include class signature information.""" + flow = SimpleFlow() + structure = build_flow_structure(flow) + + for node_name, node_metadata in structure["nodes"].items(): + assert node_metadata["class_name"] is not None + assert node_metadata["class_name"] == "SimpleFlow" + assert node_metadata["class_signature"] is not None + assert "SimpleFlow" in node_metadata["class_signature"] + + +def test_visualization_plot_method(): + """Test that flow.plot() method works.""" + flow = SimpleFlow() + + html_file = flow.plot("test_plot.html", show=False) + + assert os.path.exists(html_file) + + +def test_router_paths_to_string_conditions(): + """Test that router paths correctly connect to listeners with string conditions.""" + + class RouterToStringFlow(Flow): + @start() + def init(self): + return "initialized" + + @router(init) + def decide(self): + if hasattr(self, "state") and self.state.get("path") == "b": + return "path_b" + return "path_a" + + @listen(or_("path_a", "path_b")) + def handle_either(self): + return "handled" + + @listen("path_b") + def handle_b_only(self): + return "handled_b" + + flow = RouterToStringFlow() + structure = build_flow_structure(flow) + + decide_node = structure["nodes"]["decide"] + assert "path_a" in decide_node["router_paths"] + assert "path_b" in decide_node["router_paths"] + + router_edges = [edge for edge in structure["edges"] if edge["is_router_path"]] + + assert len(router_edges) == 3 + + edges_to_handle_either = [ + edge for edge in router_edges if edge["target"] == "handle_either" + ] + assert len(edges_to_handle_either) == 2 + + edges_to_handle_b_only = [ + edge for edge in router_edges if edge["target"] == "handle_b_only" + ] + assert len(edges_to_handle_b_only) == 1 + + +def test_router_paths_not_in_and_conditions(): + """Test that router paths don't create edges to AND-nested conditions.""" + + class RouterAndConditionFlow(Flow): + @start() + def init(self): + return "initialized" + + @router(init) + def decide(self): + return "path_a" + + @listen("path_a") + def step_1(self): + return "step_1_done" + + @listen(and_("path_a", step_1)) + def step_2_and(self): + return "step_2_done" + + @listen(or_(and_("path_a", step_1), "path_a")) + def step_3_or(self): + return "step_3_done" + + flow = RouterAndConditionFlow() + structure = build_flow_structure(flow) + + router_edges = [edge for edge in structure["edges"] if edge["is_router_path"]] + + targets = [edge["target"] for edge in router_edges] + + assert "step_1" in targets + assert "step_3_or" in targets + assert "step_2_and" not in targets \ No newline at end of file diff --git a/uv.lock b/uv.lock index 21a3db9a3..f8c14ad0f 100644 --- a/uv.lock +++ b/uv.lock @@ -295,15 +295,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c9/7f/09065fd9e27da0eda08b4d6897f1c13535066174cc023af248fc2a8d5e5a/asn1crypto-1.5.1-py2.py3-none-any.whl", hash = "sha256:db4e40728b728508912cbb3d44f19ce188f218e9eba635821bb4b68564f8fd67", size = 105045, upload-time = "2022-03-15T14:46:51.055Z" }, ] -[[package]] -name = "asttokens" -version = "3.0.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/4a/e7/82da0a03e7ba5141f05cce0d302e6eed121ae055e0456ca228bf693984bc/asttokens-3.0.0.tar.gz", hash = "sha256:0dcd8baa8d62b0c1d118b399b2ddba3c4aff271d0d7a9e0d4c1681c79035bbc7", size = 61978, upload-time = "2024-11-30T04:30:14.439Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/25/8a/c46dcc25341b5bce5472c718902eb3d38600a903b14fa6aeecef3f21a46f/asttokens-3.0.0-py3-none-any.whl", hash = "sha256:e3078351a059199dd5138cb1c706e6430c05eff2ff136af5eb4790f9d28932e2", size = 26918, upload-time = "2024-11-30T04:30:10.946Z" }, -] - [[package]] name = "async-timeout" version = "5.0.1" @@ -1077,7 +1068,6 @@ dependencies = [ { name = "pydantic-settings" }, { name = "pyjwt" }, { name = "python-dotenv" }, - { name = "pyvis" }, { name = "regex" }, { name = "tokenizers" }, { name = "tomli" }, @@ -1170,7 +1160,6 @@ requires-dist = [ { name = "pydantic-settings", specifier = ">=2.10.1" }, { name = "pyjwt", specifier = ">=2.9.0" }, { name = "python-dotenv", specifier = ">=1.1.1" }, - { name = "pyvis", specifier = ">=0.3.2" }, { name = "qdrant-client", extras = ["fastembed"], marker = "extra == 'qdrant'", specifier = ">=1.14.3" }, { name = "regex", specifier = ">=2024.9.11" }, { name = "tiktoken", marker = "extra == 'embeddings'", specifier = "~=0.8.0" }, @@ -1810,15 +1799,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/43/09/2aea36ff60d16dd8879bdb2f5b3ee0ba8d08cbbdcdfe870e695ce3784385/execnet-2.1.1-py3-none-any.whl", hash = "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc", size = 40612, upload-time = "2024-04-08T09:04:17.414Z" }, ] -[[package]] -name = "executing" -version = "2.2.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/cc/28/c14e053b6762b1044f34a13aab6859bbf40456d37d23aa286ac24cfd9a5d/executing-2.2.1.tar.gz", hash = "sha256:3632cc370565f6648cc328b32435bd120a1e4ebb20c77e3fdde9a13cd1e533c4", size = 1129488, upload-time = "2025-09-01T09:48:10.866Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c1/ea/53f2148663b321f21b5a606bd5f191517cf40b7072c0497d3c92c4a13b1e/executing-2.2.1-py2.py3-none-any.whl", hash = "sha256:760643d3452b4d777d295bb167ccc74c64a81df23fb5e08eff250c425a4b2017", size = 28317, upload-time = "2025-09-01T09:48:08.5Z" }, -] - [[package]] name = "faker" version = "37.11.0" @@ -2887,90 +2867,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/32/4b/b99e37f88336009971405cbb7630610322ed6fbfa31e1d7ab3fbf3049a2d/invoke-2.2.1-py3-none-any.whl", hash = "sha256:2413bc441b376e5cd3f55bb5d364f973ad8bdd7bf87e53c79de3c11bf3feecc8", size = 160287, upload-time = "2025-10-11T00:36:33.703Z" }, ] -[[package]] -name = "ipython" -version = "8.37.0" -source = { registry = "https://pypi.org/simple" } -resolution-markers = [ - "python_full_version < '3.11' and platform_python_implementation != 'PyPy' and sys_platform == 'darwin'", - "python_full_version < '3.11' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and sys_platform == 'linux'", - "(python_full_version < '3.11' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and sys_platform == 'linux') or (python_full_version < '3.11' and platform_python_implementation != 'PyPy' and sys_platform != 'darwin' and sys_platform != 'linux')", - "python_full_version < '3.11' and platform_python_implementation == 'PyPy' and sys_platform == 'darwin'", - "python_full_version < '3.11' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and sys_platform == 'linux'", - "(python_full_version < '3.11' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and sys_platform == 'linux') or (python_full_version < '3.11' and platform_python_implementation == 'PyPy' and sys_platform != 'darwin' and sys_platform != 'linux')", -] -dependencies = [ - { name = "colorama", marker = "python_full_version < '3.11' and sys_platform == 'win32'" }, - { name = "decorator", marker = "python_full_version < '3.11'" }, - { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, - { name = "jedi", marker = "python_full_version < '3.11'" }, - { name = "matplotlib-inline", marker = "python_full_version < '3.11'" }, - { name = "pexpect", marker = "python_full_version < '3.11' and sys_platform != 'emscripten' and sys_platform != 'win32'" }, - { name = "prompt-toolkit", marker = "python_full_version < '3.11'" }, - { name = "pygments", marker = "python_full_version < '3.11'" }, - { name = "stack-data", marker = "python_full_version < '3.11'" }, - { name = "traitlets", marker = "python_full_version < '3.11'" }, - { name = "typing-extensions", marker = "python_full_version < '3.11'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/85/31/10ac88f3357fc276dc8a64e8880c82e80e7459326ae1d0a211b40abf6665/ipython-8.37.0.tar.gz", hash = "sha256:ca815841e1a41a1e6b73a0b08f3038af9b2252564d01fc405356d34033012216", size = 5606088, upload-time = "2025-05-31T16:39:09.613Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/91/d0/274fbf7b0b12643cbbc001ce13e6a5b1607ac4929d1b11c72460152c9fc3/ipython-8.37.0-py3-none-any.whl", hash = "sha256:ed87326596b878932dbcb171e3e698845434d8c61b8d8cd474bf663041a9dcf2", size = 831864, upload-time = "2025-05-31T16:39:06.38Z" }, -] - -[[package]] -name = "ipython" -version = "9.6.0" -source = { registry = "https://pypi.org/simple" } -resolution-markers = [ - "python_full_version >= '3.13' and platform_python_implementation != 'PyPy' and sys_platform == 'darwin'", - "python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and sys_platform == 'linux'", - "(python_full_version >= '3.13' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and sys_platform == 'linux') or (python_full_version >= '3.13' and platform_python_implementation != 'PyPy' and sys_platform != 'darwin' and sys_platform != 'linux')", - "python_full_version == '3.12.*' and platform_python_implementation != 'PyPy' and sys_platform == 'darwin'", - "python_full_version == '3.12.*' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and sys_platform == 'linux'", - "(python_full_version == '3.12.*' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and sys_platform == 'linux') or (python_full_version == '3.12.*' and platform_python_implementation != 'PyPy' and sys_platform != 'darwin' and sys_platform != 'linux')", - "python_full_version == '3.11.*' and platform_python_implementation != 'PyPy' and sys_platform == 'darwin'", - "python_full_version == '3.11.*' and platform_machine == 'aarch64' and platform_python_implementation != 'PyPy' and sys_platform == 'linux'", - "(python_full_version == '3.11.*' and platform_machine != 'aarch64' and platform_python_implementation != 'PyPy' and sys_platform == 'linux') or (python_full_version == '3.11.*' and platform_python_implementation != 'PyPy' and sys_platform != 'darwin' and sys_platform != 'linux')", - "python_full_version >= '3.13' and platform_python_implementation == 'PyPy' and sys_platform == 'darwin'", - "python_full_version >= '3.13' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and sys_platform == 'linux'", - "(python_full_version >= '3.13' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and sys_platform == 'linux') or (python_full_version >= '3.13' and platform_python_implementation == 'PyPy' and sys_platform != 'darwin' and sys_platform != 'linux')", - "python_full_version == '3.12.*' and platform_python_implementation == 'PyPy' and sys_platform == 'darwin'", - "python_full_version == '3.12.*' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and sys_platform == 'linux'", - "(python_full_version == '3.12.*' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and sys_platform == 'linux') or (python_full_version == '3.12.*' and platform_python_implementation == 'PyPy' and sys_platform != 'darwin' and sys_platform != 'linux')", - "python_full_version == '3.11.*' and platform_python_implementation == 'PyPy' and sys_platform == 'darwin'", - "python_full_version == '3.11.*' and platform_machine == 'aarch64' and platform_python_implementation == 'PyPy' and sys_platform == 'linux'", - "(python_full_version == '3.11.*' and platform_machine != 'aarch64' and platform_python_implementation == 'PyPy' and sys_platform == 'linux') or (python_full_version == '3.11.*' and platform_python_implementation == 'PyPy' and sys_platform != 'darwin' and sys_platform != 'linux')", -] -dependencies = [ - { name = "colorama", marker = "python_full_version >= '3.11' and sys_platform == 'win32'" }, - { name = "decorator", marker = "python_full_version >= '3.11'" }, - { name = "ipython-pygments-lexers", marker = "python_full_version >= '3.11'" }, - { name = "jedi", marker = "python_full_version >= '3.11'" }, - { name = "matplotlib-inline", marker = "python_full_version >= '3.11'" }, - { name = "pexpect", marker = "python_full_version >= '3.11' and sys_platform != 'emscripten' and sys_platform != 'win32'" }, - { name = "prompt-toolkit", marker = "python_full_version >= '3.11'" }, - { name = "pygments", marker = "python_full_version >= '3.11'" }, - { name = "stack-data", marker = "python_full_version >= '3.11'" }, - { name = "traitlets", marker = "python_full_version >= '3.11'" }, - { name = "typing-extensions", marker = "python_full_version == '3.11.*'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/2a/34/29b18c62e39ee2f7a6a3bba7efd952729d8aadd45ca17efc34453b717665/ipython-9.6.0.tar.gz", hash = "sha256:5603d6d5d356378be5043e69441a072b50a5b33b4503428c77b04cb8ce7bc731", size = 4396932, upload-time = "2025-09-29T10:55:53.948Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/48/c5/d5e07995077e48220269c28a221e168c91123ad5ceee44d548f54a057fc0/ipython-9.6.0-py3-none-any.whl", hash = "sha256:5f77efafc886d2f023442479b8149e7d86547ad0a979e9da9f045d252f648196", size = 616170, upload-time = "2025-09-29T10:55:47.676Z" }, -] - -[[package]] -name = "ipython-pygments-lexers" -version = "1.1.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "pygments", marker = "python_full_version >= '3.11'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/ef/4c/5dd1d8af08107f88c7f741ead7a40854b8ac24ddf9ae850afbcf698aa552/ipython_pygments_lexers-1.1.1.tar.gz", hash = "sha256:09c0138009e56b6854f9535736f4171d855c8c08a563a0dcd8022f78355c7e81", size = 8393, upload-time = "2025-01-17T11:24:34.505Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d9/33/1f075bf72b0b747cb3288d011319aaf64083cf2efef8354174e3ed4540e2/ipython_pygments_lexers-1.1.1-py3-none-any.whl", hash = "sha256:a9462224a505ade19a605f71f8fa63c2048833ce50abc86768a0d81d876dc81c", size = 8074, upload-time = "2025-01-17T11:24:33.271Z" }, -] - [[package]] name = "isodate" version = "0.7.2" @@ -2980,18 +2876,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/15/aa/0aca39a37d3c7eb941ba736ede56d689e7be91cab5d9ca846bde3999eba6/isodate-0.7.2-py3-none-any.whl", hash = "sha256:28009937d8031054830160fce6d409ed342816b543597cece116d966c6d99e15", size = 22320, upload-time = "2024-10-08T23:04:09.501Z" }, ] -[[package]] -name = "jedi" -version = "0.19.2" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "parso" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/72/3a/79a912fbd4d8dd6fbb02bf69afd3bb72cf0c729bb3063c6f4498603db17a/jedi-0.19.2.tar.gz", hash = "sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0", size = 1231287, upload-time = "2024-11-11T01:41:42.873Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c0/5a/9cac0c82afec3d09ccd97c8b6502d48f165f9124db81b4bcb90b4af974ee/jedi-0.19.2-py2.py3-none-any.whl", hash = "sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9", size = 1572278, upload-time = "2024-11-11T01:41:40.175Z" }, -] - [[package]] name = "jinja2" version = "3.1.6" @@ -3123,15 +3007,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/73/07/02e16ed01e04a374e644b575638ec7987ae846d25ad97bcc9945a3ee4b0e/jsonpatch-1.33-py2.py3-none-any.whl", hash = "sha256:0ae28c0cd062bbd8b8ecc26d7d164fbbea9652a1a3693f3b956c1eae5145dade", size = 12898, upload-time = "2023-06-16T21:01:28.466Z" }, ] -[[package]] -name = "jsonpickle" -version = "4.1.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e4/a6/d07afcfdef402900229bcca795f80506b207af13a838d4d99ad45abf530c/jsonpickle-4.1.1.tar.gz", hash = "sha256:f86e18f13e2b96c1c1eede0b7b90095bbb61d99fedc14813c44dc2f361dbbae1", size = 316885, upload-time = "2025-06-02T20:36:11.57Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c1/73/04df8a6fa66d43a9fd45c30f283cc4afff17da671886e451d52af60bdc7e/jsonpickle-4.1.1-py3-none-any.whl", hash = "sha256:bb141da6057898aa2438ff268362b126826c812a1721e31cf08a6e142910dc91", size = 47125, upload-time = "2025-06-02T20:36:08.647Z" }, -] - [[package]] name = "jsonpointer" version = "3.0.0" @@ -3727,18 +3602,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/9a/cc/3fe688ff1355010937713164caacf9ed443675ac48a997bab6ed23b3f7c0/matplotlib-3.10.7-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3886e47f64611046bc1db523a09dd0a0a6bed6081e6f90e13806dd1d1d1b5e91", size = 8693919, upload-time = "2025-10-09T00:27:58.41Z" }, ] -[[package]] -name = "matplotlib-inline" -version = "0.1.7" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "traitlets" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/99/5b/a36a337438a14116b16480db471ad061c36c3694df7c2084a0da7ba538b7/matplotlib_inline-0.1.7.tar.gz", hash = "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90", size = 8159, upload-time = "2024-04-15T13:44:44.803Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/8f/8e/9ad090d3553c280a8060fbf6e24dc1c0c29704ee7d1c372f0c174aa59285/matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca", size = 9899, upload-time = "2024-04-15T13:44:43.265Z" }, -] - [[package]] name = "mcp" version = "1.18.0" @@ -4998,15 +4861,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/aa/0f/c8b64d9b54ea631fcad4e9e3c8dbe8c11bb32a623be94f22974c88e71eaf/parsimonious-0.10.0-py3-none-any.whl", hash = "sha256:982ab435fabe86519b57f6b35610aa4e4e977e9f02a14353edf4bbc75369fc0f", size = 48427, upload-time = "2022-09-03T17:01:13.814Z" }, ] -[[package]] -name = "parso" -version = "0.8.5" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d4/de/53e0bcf53d13e005bd8c92e7855142494f41171b34c2536b86187474184d/parso-0.8.5.tar.gz", hash = "sha256:034d7354a9a018bdce352f48b2a8a450f05e9d6ee85db84764e9b6bd96dafe5a", size = 401205, upload-time = "2025-08-23T15:15:28.028Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/16/32/f8e3c85d1d5250232a5d3477a2a28cc291968ff175caeadaf3cc19ce0e4a/parso-0.8.5-py2.py3-none-any.whl", hash = "sha256:646204b5ee239c396d040b90f9e272e9a8017c630092bf59980beb62fd033887", size = 106668, upload-time = "2025-08-23T15:15:25.663Z" }, -] - [[package]] name = "pathspec" version = "0.12.1" @@ -5093,18 +4947,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/db/e0/52b67d4f00e09e497aec4f71bc44d395605e8ebcea52543242ed34c25ef9/pdfplumber-0.11.7-py3-none-any.whl", hash = "sha256:edd2195cca68bd770da479cf528a737e362968ec2351e62a6c0b71ff612ac25e", size = 60029, upload-time = "2025-06-12T11:30:48.89Z" }, ] -[[package]] -name = "pexpect" -version = "4.9.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "ptyprocess" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/42/92/cc564bf6381ff43ce1f4d06852fc19a2f11d180f23dc32d9588bee2f149d/pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f", size = 166450, upload-time = "2023-11-25T09:07:26.339Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/9e/c3/059298687310d527a58bb01f3b1965787ee3b40dce76752eda8b44e9a2c5/pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523", size = 63772, upload-time = "2023-11-25T06:56:14.81Z" }, -] - [[package]] name = "pi-heif" version = "0.22.0" @@ -5344,18 +5186,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5b/a5/987a405322d78a73b66e39e4a90e4ef156fd7141bf71df987e50717c321b/pre_commit-4.3.0-py2.py3-none-any.whl", hash = "sha256:2b0747ad7e6e967169136edffee14c16e148a778a54e4f967921aa1ebf2308d8", size = 220965, upload-time = "2025-08-09T18:56:13.192Z" }, ] -[[package]] -name = "prompt-toolkit" -version = "3.0.52" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "wcwidth" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/a1/96/06e01a7b38dce6fe1db213e061a4602dd6032a8a97ef6c1a862537732421/prompt_toolkit-3.0.52.tar.gz", hash = "sha256:28cde192929c8e7321de85de1ddbe736f1375148b02f2e17edd840042b1be855", size = 434198, upload-time = "2025-08-27T15:24:02.057Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/84/03/0d3ce49e2505ae70cf43bc5bb3033955d2fc9f932163e84dc0779cc47f48/prompt_toolkit-3.0.52-py3-none-any.whl", hash = "sha256:9aac639a3bbd33284347de5ad8d68ecc044b91a762dc39b7c21095fcd6a19955", size = 391431, upload-time = "2025-08-27T15:23:59.498Z" }, -] - [[package]] name = "propcache" version = "0.4.1" @@ -5493,8 +5323,10 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/8c/df/16848771155e7c419c60afeb24950b8aaa3ab09c0a091ec3ccca26a574d0/psycopg2_binary-2.9.11-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c47676e5b485393f069b4d7a811267d3168ce46f988fa602658b8bb901e9e64d", size = 4410873, upload-time = "2025-10-10T11:10:38.951Z" }, { url = "https://files.pythonhosted.org/packages/43/79/5ef5f32621abd5a541b89b04231fe959a9b327c874a1d41156041c75494b/psycopg2_binary-2.9.11-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:a28d8c01a7b27a1e3265b11250ba7557e5f72b5ee9e5f3a2fa8d2949c29bf5d2", size = 4468016, upload-time = "2025-10-10T11:10:43.319Z" }, { url = "https://files.pythonhosted.org/packages/f0/9b/d7542d0f7ad78f57385971f426704776d7b310f5219ed58da5d605b1892e/psycopg2_binary-2.9.11-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5f3f2732cf504a1aa9e9609d02f79bea1067d99edf844ab92c247bbca143303b", size = 4164996, upload-time = "2025-10-10T11:10:46.705Z" }, + { url = "https://files.pythonhosted.org/packages/14/ed/e409388b537fa7414330687936917c522f6a77a13474e4238219fcfd9a84/psycopg2_binary-2.9.11-cp310-cp310-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:865f9945ed1b3950d968ec4690ce68c55019d79e4497366d36e090327ce7db14", size = 3981881, upload-time = "2025-10-30T02:54:57.182Z" }, { url = "https://files.pythonhosted.org/packages/bf/30/50e330e63bb05efc6fa7c1447df3e08954894025ca3dcb396ecc6739bc26/psycopg2_binary-2.9.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:91537a8df2bde69b1c1db01d6d944c831ca793952e4f57892600e96cee95f2cd", size = 3650857, upload-time = "2025-10-10T11:10:50.112Z" }, { url = "https://files.pythonhosted.org/packages/f0/e0/4026e4c12bb49dd028756c5b0bc4c572319f2d8f1c9008e0dad8cc9addd7/psycopg2_binary-2.9.11-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:4dca1f356a67ecb68c81a7bc7809f1569ad9e152ce7fd02c2f2036862ca9f66b", size = 3296063, upload-time = "2025-10-10T11:10:54.089Z" }, + { url = "https://files.pythonhosted.org/packages/2c/34/eb172be293c886fef5299fe5c3fcf180a05478be89856067881007934a7c/psycopg2_binary-2.9.11-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:0da4de5c1ac69d94ed4364b6cbe7190c1a70d325f112ba783d83f8440285f152", size = 3043464, upload-time = "2025-10-30T02:55:02.483Z" }, { url = "https://files.pythonhosted.org/packages/18/1c/532c5d2cb11986372f14b798a95f2eaafe5779334f6a80589a68b5fcf769/psycopg2_binary-2.9.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:37d8412565a7267f7d79e29ab66876e55cb5e8e7b3bbf94f8206f6795f8f7e7e", size = 3345378, upload-time = "2025-10-10T11:11:01.039Z" }, { url = "https://files.pythonhosted.org/packages/70/e7/de420e1cf16f838e1fa17b1120e83afff374c7c0130d088dba6286fcf8ea/psycopg2_binary-2.9.11-cp310-cp310-win_amd64.whl", hash = "sha256:c665f01ec8ab273a61c62beeb8cce3014c214429ced8a308ca1fc410ecac3a39", size = 2713904, upload-time = "2025-10-10T11:11:04.81Z" }, { url = "https://files.pythonhosted.org/packages/c7/ae/8d8266f6dd183ab4d48b95b9674034e1b482a3f8619b33a0d86438694577/psycopg2_binary-2.9.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0e8480afd62362d0a6a27dd09e4ca2def6fa50ed3a4e7c09165266106b2ffa10", size = 3756452, upload-time = "2025-10-10T11:11:11.583Z" }, @@ -5502,8 +5334,10 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/48/89/3fdb5902bdab8868bbedc1c6e6023a4e08112ceac5db97fc2012060e0c9a/psycopg2_binary-2.9.11-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2e164359396576a3cc701ba8af4751ae68a07235d7a380c631184a611220d9a4", size = 4410955, upload-time = "2025-10-10T11:11:21.21Z" }, { url = "https://files.pythonhosted.org/packages/ce/24/e18339c407a13c72b336e0d9013fbbbde77b6fd13e853979019a1269519c/psycopg2_binary-2.9.11-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:d57c9c387660b8893093459738b6abddbb30a7eab058b77b0d0d1c7d521ddfd7", size = 4468007, upload-time = "2025-10-10T11:11:24.831Z" }, { url = "https://files.pythonhosted.org/packages/91/7e/b8441e831a0f16c159b5381698f9f7f7ed54b77d57bc9c5f99144cc78232/psycopg2_binary-2.9.11-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2c226ef95eb2250974bf6fa7a842082b31f68385c4f3268370e3f3870e7859ee", size = 4165012, upload-time = "2025-10-10T11:11:29.51Z" }, + { url = "https://files.pythonhosted.org/packages/0d/61/4aa89eeb6d751f05178a13da95516c036e27468c5d4d2509bb1e15341c81/psycopg2_binary-2.9.11-cp311-cp311-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a311f1edc9967723d3511ea7d2708e2c3592e3405677bf53d5c7246753591fbb", size = 3981881, upload-time = "2025-10-30T02:55:07.332Z" }, { url = "https://files.pythonhosted.org/packages/76/a1/2f5841cae4c635a9459fe7aca8ed771336e9383b6429e05c01267b0774cf/psycopg2_binary-2.9.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ebb415404821b6d1c47353ebe9c8645967a5235e6d88f914147e7fd411419e6f", size = 3650985, upload-time = "2025-10-10T11:11:34.975Z" }, { url = "https://files.pythonhosted.org/packages/84/74/4defcac9d002bca5709951b975173c8c2fa968e1a95dc713f61b3a8d3b6a/psycopg2_binary-2.9.11-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f07c9c4a5093258a03b28fab9b4f151aa376989e7f35f855088234e656ee6a94", size = 3296039, upload-time = "2025-10-10T11:11:40.432Z" }, + { url = "https://files.pythonhosted.org/packages/6d/c2/782a3c64403d8ce35b5c50e1b684412cf94f171dc18111be8c976abd2de1/psycopg2_binary-2.9.11-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:00ce1830d971f43b667abe4a56e42c1e2d594b32da4802e44a73bacacb25535f", size = 3043477, upload-time = "2025-10-30T02:55:11.182Z" }, { url = "https://files.pythonhosted.org/packages/c8/31/36a1d8e702aa35c38fc117c2b8be3f182613faa25d794b8aeaab948d4c03/psycopg2_binary-2.9.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:cffe9d7697ae7456649617e8bb8d7a45afb71cd13f7ab22af3e5c61f04840908", size = 3345842, upload-time = "2025-10-10T11:11:45.366Z" }, { url = "https://files.pythonhosted.org/packages/6e/b4/a5375cda5b54cb95ee9b836930fea30ae5a8f14aa97da7821722323d979b/psycopg2_binary-2.9.11-cp311-cp311-win_amd64.whl", hash = "sha256:304fd7b7f97eef30e91b8f7e720b3db75fee010b520e434ea35ed1ff22501d03", size = 2713894, upload-time = "2025-10-10T11:11:48.775Z" }, { url = "https://files.pythonhosted.org/packages/d8/91/f870a02f51be4a65987b45a7de4c2e1897dd0d01051e2b559a38fa634e3e/psycopg2_binary-2.9.11-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:be9b840ac0525a283a96b556616f5b4820e0526addb8dcf6525a0fa162730be4", size = 3756603, upload-time = "2025-10-10T11:11:52.213Z" }, @@ -5511,8 +5345,10 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/2d/75/364847b879eb630b3ac8293798e380e441a957c53657995053c5ec39a316/psycopg2_binary-2.9.11-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ab8905b5dcb05bf3fb22e0cf90e10f469563486ffb6a96569e51f897c750a76a", size = 4411159, upload-time = "2025-10-10T11:12:00.49Z" }, { url = "https://files.pythonhosted.org/packages/6f/a0/567f7ea38b6e1c62aafd58375665a547c00c608a471620c0edc364733e13/psycopg2_binary-2.9.11-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:bf940cd7e7fec19181fdbc29d76911741153d51cab52e5c21165f3262125685e", size = 4468234, upload-time = "2025-10-10T11:12:04.892Z" }, { url = "https://files.pythonhosted.org/packages/30/da/4e42788fb811bbbfd7b7f045570c062f49e350e1d1f3df056c3fb5763353/psycopg2_binary-2.9.11-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fa0f693d3c68ae925966f0b14b8edda71696608039f4ed61b1fe9ffa468d16db", size = 4166236, upload-time = "2025-10-10T11:12:11.674Z" }, + { url = "https://files.pythonhosted.org/packages/3c/94/c1777c355bc560992af848d98216148be5f1be001af06e06fc49cbded578/psycopg2_binary-2.9.11-cp312-cp312-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a1cf393f1cdaf6a9b57c0a719a1068ba1069f022a59b8b1fe44b006745b59757", size = 3983083, upload-time = "2025-10-30T02:55:15.73Z" }, { url = "https://files.pythonhosted.org/packages/bd/42/c9a21edf0e3daa7825ed04a4a8588686c6c14904344344a039556d78aa58/psycopg2_binary-2.9.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ef7a6beb4beaa62f88592ccc65df20328029d721db309cb3250b0aae0fa146c3", size = 3652281, upload-time = "2025-10-10T11:12:17.713Z" }, { url = "https://files.pythonhosted.org/packages/12/22/dedfbcfa97917982301496b6b5e5e6c5531d1f35dd2b488b08d1ebc52482/psycopg2_binary-2.9.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:31b32c457a6025e74d233957cc9736742ac5a6cb196c6b68499f6bb51390bd6a", size = 3298010, upload-time = "2025-10-10T11:12:22.671Z" }, + { url = "https://files.pythonhosted.org/packages/66/ea/d3390e6696276078bd01b2ece417deac954dfdd552d2edc3d03204416c0c/psycopg2_binary-2.9.11-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:edcb3aeb11cb4bf13a2af3c53a15b3d612edeb6409047ea0b5d6a21a9d744b34", size = 3044641, upload-time = "2025-10-30T02:55:19.929Z" }, { url = "https://files.pythonhosted.org/packages/12/9a/0402ded6cbd321da0c0ba7d34dc12b29b14f5764c2fc10750daa38e825fc/psycopg2_binary-2.9.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:62b6d93d7c0b61a1dd6197d208ab613eb7dcfdcca0a49c42ceb082257991de9d", size = 3347940, upload-time = "2025-10-10T11:12:26.529Z" }, { url = "https://files.pythonhosted.org/packages/b1/d2/99b55e85832ccde77b211738ff3925a5d73ad183c0b37bcbbe5a8ff04978/psycopg2_binary-2.9.11-cp312-cp312-win_amd64.whl", hash = "sha256:b33fabeb1fde21180479b2d4667e994de7bbf0eec22832ba5d9b5e4cf65b6c6d", size = 2714147, upload-time = "2025-10-10T11:12:29.535Z" }, { url = "https://files.pythonhosted.org/packages/ff/a8/a2709681b3ac11b0b1786def10006b8995125ba268c9a54bea6f5ae8bd3e/psycopg2_binary-2.9.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b8fb3db325435d34235b044b199e56cdf9ff41223a4b9752e8576465170bb38c", size = 3756572, upload-time = "2025-10-10T11:12:32.873Z" }, @@ -5520,30 +5356,14 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/11/32/b2ffe8f3853c181e88f0a157c5fb4e383102238d73c52ac6d93a5c8bffe6/psycopg2_binary-2.9.11-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8c55b385daa2f92cb64b12ec4536c66954ac53654c7f15a203578da4e78105c0", size = 4411242, upload-time = "2025-10-10T11:12:42.388Z" }, { url = "https://files.pythonhosted.org/packages/10/04/6ca7477e6160ae258dc96f67c371157776564679aefd247b66f4661501a2/psycopg2_binary-2.9.11-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:c0377174bf1dd416993d16edc15357f6eb17ac998244cca19bc67cdc0e2e5766", size = 4468258, upload-time = "2025-10-10T11:12:48.654Z" }, { url = "https://files.pythonhosted.org/packages/3c/7e/6a1a38f86412df101435809f225d57c1a021307dd0689f7a5e7fe83588b1/psycopg2_binary-2.9.11-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5c6ff3335ce08c75afaed19e08699e8aacf95d4a260b495a4a8545244fe2ceb3", size = 4166295, upload-time = "2025-10-10T11:12:52.525Z" }, + { url = "https://files.pythonhosted.org/packages/f2/7d/c07374c501b45f3579a9eb761cbf2604ddef3d96ad48679112c2c5aa9c25/psycopg2_binary-2.9.11-cp313-cp313-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:84011ba3109e06ac412f95399b704d3d6950e386b7994475b231cf61eec2fc1f", size = 3983133, upload-time = "2025-10-30T02:55:24.329Z" }, { url = "https://files.pythonhosted.org/packages/82/56/993b7104cb8345ad7d4516538ccf8f0d0ac640b1ebd8c754a7b024e76878/psycopg2_binary-2.9.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ba34475ceb08cccbdd98f6b46916917ae6eeb92b5ae111df10b544c3a4621dc4", size = 3652383, upload-time = "2025-10-10T11:12:56.387Z" }, { url = "https://files.pythonhosted.org/packages/2d/ac/eaeb6029362fd8d454a27374d84c6866c82c33bfc24587b4face5a8e43ef/psycopg2_binary-2.9.11-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b31e90fdd0f968c2de3b26ab014314fe814225b6c324f770952f7d38abf17e3c", size = 3298168, upload-time = "2025-10-10T11:13:00.403Z" }, + { url = "https://files.pythonhosted.org/packages/2b/39/50c3facc66bded9ada5cbc0de867499a703dc6bca6be03070b4e3b65da6c/psycopg2_binary-2.9.11-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:d526864e0f67f74937a8fce859bd56c979f5e2ec57ca7c627f5f1071ef7fee60", size = 3044712, upload-time = "2025-10-30T02:55:27.975Z" }, { url = "https://files.pythonhosted.org/packages/9c/8e/b7de019a1f562f72ada81081a12823d3c1590bedc48d7d2559410a2763fe/psycopg2_binary-2.9.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:04195548662fa544626c8ea0f06561eb6203f1984ba5b4562764fbeb4c3d14b1", size = 3347549, upload-time = "2025-10-10T11:13:03.971Z" }, { url = "https://files.pythonhosted.org/packages/80/2d/1bb683f64737bbb1f86c82b7359db1eb2be4e2c0c13b947f80efefa7d3e5/psycopg2_binary-2.9.11-cp313-cp313-win_amd64.whl", hash = "sha256:efff12b432179443f54e230fdf60de1f6cc726b6c832db8701227d089310e8aa", size = 2714215, upload-time = "2025-10-10T11:13:07.14Z" }, ] -[[package]] -name = "ptyprocess" -version = "0.7.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/20/e5/16ff212c1e452235a90aeb09066144d0c5a6a8c0834397e03f5224495c4e/ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220", size = 70762, upload-time = "2020-12-28T15:15:30.155Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/22/a6/858897256d0deac81a172289110f31629fc4cee19b6f01283303e18c8db3/ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35", size = 13993, upload-time = "2020-12-28T15:15:28.35Z" }, -] - -[[package]] -name = "pure-eval" -version = "0.2.3" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/cd/05/0a34433a064256a578f1783a10da6df098ceaa4a57bbeaa96a6c0352786b/pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42", size = 19752, upload-time = "2024-07-21T12:58:21.801Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/8e/37/efad0257dc6e593a18957422533ff0f87ede7c9c6ea010a2177d738fb82f/pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0", size = 11842, upload-time = "2024-07-21T12:58:20.04Z" }, -] - [[package]] name = "py-rust-stemmers" version = "0.1.5" @@ -6572,22 +6392,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/81/c4/34e93fe5f5429d7570ec1fa436f1986fb1f00c3e0f43a589fe2bbcd22c3f/pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00", size = 509225, upload-time = "2025-03-25T02:24:58.468Z" }, ] -[[package]] -name = "pyvis" -version = "0.3.2" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "ipython", version = "8.37.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, - { name = "ipython", version = "9.6.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, - { name = "jinja2" }, - { name = "jsonpickle" }, - { name = "networkx", version = "3.4.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, - { name = "networkx", version = "3.5", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, -] -wheels = [ - { url = "https://files.pythonhosted.org/packages/ab/4b/e37e4e5d5ee1179694917b445768bdbfb084f5a59ecd38089d3413d4c70f/pyvis-0.3.2-py3-none-any.whl", hash = "sha256:5720c4ca8161dc5d9ab352015723abb7a8bb8fb443edeb07f7a322db34a97555", size = 756038, upload-time = "2023-02-24T20:29:46.758Z" }, -] - [[package]] name = "pywin32" version = "311" @@ -7739,20 +7543,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ef/10/c78f463b4ef22eef8491f218f692be838282cd65480f6e423d7730dfd1fb/sse_starlette-3.0.2-py3-none-any.whl", hash = "sha256:16b7cbfddbcd4eaca11f7b586f3b8a080f1afe952c15813455b162edea619e5a", size = 11297, upload-time = "2025-07-27T09:07:43.268Z" }, ] -[[package]] -name = "stack-data" -version = "0.6.3" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "asttokens" }, - { name = "executing" }, - { name = "pure-eval" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/28/e3/55dcc2cfbc3ca9c29519eb6884dd1415ecb53b0e934862d3559ddcb7e20b/stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9", size = 44707, upload-time = "2023-09-30T13:58:05.479Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/f1/7b/ce1eafaf1a76852e2ec9b22edecf1daa58175c090266e9f6c64afcd81d91/stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695", size = 24521, upload-time = "2023-09-30T13:58:03.53Z" }, -] - [[package]] name = "stagehand" version = "0.5.0" @@ -8075,15 +7865,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540, upload-time = "2024-11-24T20:12:19.698Z" }, ] -[[package]] -name = "traitlets" -version = "5.14.3" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/eb/79/72064e6a701c2183016abbbfedaba506d81e30e232a68c9f0d6f6fcd1574/traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7", size = 161621, upload-time = "2024-04-19T11:11:49.746Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/00/c0/8f5d070730d7836adc9c9b6408dec68c6ced86b304a9b26a14df072a6e8c/traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f", size = 85359, upload-time = "2024-04-19T11:11:46.763Z" }, -] - [[package]] name = "transformers" version = "4.57.1" @@ -8754,15 +8535,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/6e/d4/ed38dd3b1767193de971e694aa544356e63353c33a85d948166b5ff58b9e/watchfiles-1.1.1-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e6f39af2eab0118338902798b5aa6664f46ff66bc0280de76fca67a7f262a49", size = 457546, upload-time = "2025-10-14T15:06:13.372Z" }, ] -[[package]] -name = "wcwidth" -version = "0.2.14" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/24/30/6b0809f4510673dc723187aeaf24c7f5459922d01e2f794277a3dfb90345/wcwidth-0.2.14.tar.gz", hash = "sha256:4d478375d31bc5395a3c55c40ccdf3354688364cd61c4f6adacaa9215d0b3605", size = 102293, upload-time = "2025-09-22T16:29:53.023Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/af/b5/123f13c975e9f27ab9c0770f514345bd406d0e8d3b7a0723af9d43f710af/wcwidth-0.2.14-py2.py3-none-any.whl", hash = "sha256:a7bb560c8aee30f9957e5f9895805edd20602f2d7f720186dfd906e82b4982e1", size = 37286, upload-time = "2025-09-22T16:29:51.641Z" }, -] - [[package]] name = "weaviate-client" version = "4.17.0"