mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-04-15 07:22:44 +00:00
Compare commits
3 Commits
devin/1739
...
devin/1739
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
583e6584eb | ||
|
|
e1ed85d7bd | ||
|
|
96b6d91084 |
@@ -14,7 +14,6 @@ icon: bars-staggered
|
||||
|
||||
- **Sequential**: Executes tasks sequentially, ensuring tasks are completed in an orderly progression.
|
||||
- **Hierarchical**: Organizes tasks in a managerial hierarchy, where tasks are delegated and executed based on a structured chain of command. A manager language model (`manager_llm`) or a custom manager agent (`manager_agent`) must be specified in the crew to enable the hierarchical process, facilitating the creation and management of tasks by the manager.
|
||||
- **Parallel**: Enables concurrent execution of multiple flows, allowing transitions from one flow to multiple parallel flows for improved task parallelization. Parallel execution is automatically handled using asyncio for optimal performance.
|
||||
- **Consensual Process (Planned)**: Aiming for collaborative decision-making among agents on task execution, this process type introduces a democratic approach to task management within CrewAI. It is planned for future development and is not currently implemented in the codebase.
|
||||
|
||||
## The Role of Processes in Teamwork
|
||||
@@ -58,30 +57,9 @@ Emulates a corporate hierarchy, CrewAI allows specifying a custom manager agent
|
||||
|
||||
## Process Class: Detailed Overview
|
||||
|
||||
The `Process` class is implemented as an enumeration (`Enum`), ensuring type safety and restricting process values to the defined types (`sequential`, `hierarchical`, `parallel`). The consensual process is planned for future inclusion, emphasizing our commitment to continuous development and innovation.
|
||||
|
||||
## Parallel Process
|
||||
|
||||
The parallel process type enables concurrent execution of multiple flows, leveraging Python's asyncio for efficient task parallelization. When using parallel execution:
|
||||
|
||||
- Multiple start methods are executed concurrently
|
||||
- Listeners can run in parallel when triggered by the same method
|
||||
- State consistency is maintained through thread-safe operations
|
||||
- Execution timing and order are preserved where necessary
|
||||
|
||||
Example of parallel flow execution:
|
||||
```python
|
||||
from crewai import Crew, Process
|
||||
|
||||
# Create a crew with parallel process
|
||||
crew = Crew(
|
||||
agents=my_agents,
|
||||
tasks=my_tasks,
|
||||
process=Process.parallel
|
||||
)
|
||||
```
|
||||
The `Process` class is implemented as an enumeration (`Enum`), ensuring type safety and restricting process values to the defined types (`sequential`, `hierarchical`). The consensual process is planned for future inclusion, emphasizing our commitment to continuous development and innovation.
|
||||
|
||||
## Conclusion
|
||||
|
||||
The structured collaboration facilitated by processes within CrewAI is crucial for enabling systematic teamwork among agents.
|
||||
This documentation has been updated to reflect the latest features, enhancements, and the planned integration of the Consensual Process, ensuring users have access to the most current and comprehensive information.
|
||||
This documentation has been updated to reflect the latest features, enhancements, and the planned integration of the Consensual Process, ensuring users have access to the most current and comprehensive information.
|
||||
@@ -66,7 +66,8 @@ ENV_VARS = {
|
||||
"azure": [
|
||||
{
|
||||
"prompt": "Enter your Azure deployment name (must start with 'azure/')",
|
||||
"key_name": "model",
|
||||
"key_name": "MODEL", # Uppercase MODEL used for consistency across environment variables
|
||||
"validator": lambda x: x.startswith("azure/") or "Model name must start with 'azure/'"
|
||||
},
|
||||
{
|
||||
"prompt": "Enter your AZURE API key (press Enter to skip)",
|
||||
@@ -84,7 +85,8 @@ ENV_VARS = {
|
||||
"cerebras": [
|
||||
{
|
||||
"prompt": "Enter your Cerebras model name (must start with 'cerebras/')",
|
||||
"key_name": "model",
|
||||
"key_name": "MODEL", # Uppercase MODEL used for consistency across environment variables
|
||||
"validator": lambda x: x.startswith("cerebras/") or "Model name must start with 'cerebras/'"
|
||||
},
|
||||
{
|
||||
"prompt": "Enter your Cerebras API version (press Enter to skip)",
|
||||
|
||||
@@ -157,10 +157,19 @@ def create_crew(name, provider=None, skip_provider=False, parent_folder=None):
|
||||
# Prompt for non-default key-value pairs
|
||||
prompt = details["prompt"]
|
||||
key_name = details["key_name"]
|
||||
api_key_value = click.prompt(prompt, default="", show_default=False)
|
||||
while True:
|
||||
api_key_value = click.prompt(prompt, default="", show_default=False)
|
||||
if not api_key_value.strip():
|
||||
break
|
||||
|
||||
if "validator" in details:
|
||||
validation_result = details["validator"](api_key_value)
|
||||
if isinstance(validation_result, str):
|
||||
click.secho(f"Invalid input: {validation_result}", fg="red")
|
||||
continue
|
||||
|
||||
if api_key_value.strip():
|
||||
env_vars[key_name] = api_key_value
|
||||
break
|
||||
|
||||
if env_vars:
|
||||
write_env_file(folder_path, env_vars)
|
||||
|
||||
@@ -8,5 +8,4 @@ class Process(str, Enum):
|
||||
|
||||
sequential = "sequential"
|
||||
hierarchical = "hierarchical"
|
||||
parallel = "parallel"
|
||||
# TODO: consensual = 'consensual'
|
||||
|
||||
@@ -21,6 +21,38 @@ from crewai.utilities import RPMController
|
||||
from crewai.utilities.events import Emitter
|
||||
|
||||
|
||||
def test_agent_model_env_var():
|
||||
"""Test MODEL environment variable handling with various cases."""
|
||||
# Store original environment variables
|
||||
original_model = os.environ.get("MODEL")
|
||||
|
||||
test_cases = [
|
||||
("azure/test-model", "azure/test-model"), # Valid Azure case
|
||||
("azure/minimal", "azure/minimal"), # Another valid Azure case
|
||||
("cerebras/test-model", "cerebras/test-model"), # Valid Cerebras case
|
||||
("cerebras/minimal", "cerebras/minimal"), # Another valid Cerebras case
|
||||
]
|
||||
|
||||
for input_model, expected_model in test_cases:
|
||||
# Set test MODEL value
|
||||
os.environ["MODEL"] = input_model
|
||||
agent = Agent(role="test role", goal="test goal", backstory="test backstory")
|
||||
assert agent.llm.model == expected_model
|
||||
|
||||
# Test missing MODEL env var
|
||||
if "MODEL" in os.environ:
|
||||
del os.environ["MODEL"]
|
||||
agent = Agent(role="test role", goal="test goal", backstory="test backstory")
|
||||
assert agent.llm.model == "gpt-4o-mini" # Default model
|
||||
|
||||
# Clean up environment variables
|
||||
if original_model:
|
||||
os.environ["MODEL"] = original_model
|
||||
else:
|
||||
if "MODEL" in os.environ:
|
||||
del os.environ["MODEL"]
|
||||
|
||||
|
||||
def test_agent_llm_creation_with_env_vars():
|
||||
# Store original environment variables
|
||||
original_api_key = os.environ.get("OPENAI_API_KEY")
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
"""Test Flow creation and execution basic functionality."""
|
||||
|
||||
import asyncio
|
||||
import time
|
||||
from datetime import datetime
|
||||
|
||||
import pytest
|
||||
@@ -621,35 +620,3 @@ def test_stateless_flow_event_emission():
|
||||
== "Deeds will not be less valiant because they are unpraised."
|
||||
)
|
||||
assert isinstance(event_log[5].timestamp, datetime)
|
||||
|
||||
|
||||
def test_parallel_flow():
|
||||
"""Test a flow where multiple listeners execute in parallel."""
|
||||
execution_order = []
|
||||
execution_times = {}
|
||||
|
||||
class ParallelFlow(Flow):
|
||||
@start()
|
||||
def start_method(self):
|
||||
execution_order.append("start")
|
||||
return "start"
|
||||
|
||||
@listen(start_method)
|
||||
async def parallel_1(self):
|
||||
await asyncio.sleep(0.1)
|
||||
execution_times["parallel_1"] = time.time()
|
||||
execution_order.append("parallel_1")
|
||||
|
||||
@listen(start_method)
|
||||
async def parallel_2(self):
|
||||
await asyncio.sleep(0.1)
|
||||
execution_times["parallel_2"] = time.time()
|
||||
execution_order.append("parallel_2")
|
||||
|
||||
flow = ParallelFlow()
|
||||
flow.kickoff()
|
||||
|
||||
assert "start" in execution_order
|
||||
assert "parallel_1" in execution_order
|
||||
assert "parallel_2" in execution_order
|
||||
assert abs(execution_times["parallel_1"] - execution_times["parallel_2"]) < 0.05
|
||||
|
||||
Reference in New Issue
Block a user