mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-10 00:28:31 +00:00
Merge pull request #1879 from tonykipkemboi/main
docs: enhance decorator documentation with use cases and examples
This commit is contained in:
@@ -31,7 +31,7 @@ From this point on, your crew will have planning enabled, and the tasks will be
|
|||||||
|
|
||||||
#### Planning LLM
|
#### Planning LLM
|
||||||
|
|
||||||
Now you can define the LLM that will be used to plan the tasks. You can use any ChatOpenAI LLM model available.
|
Now you can define the LLM that will be used to plan the tasks.
|
||||||
|
|
||||||
When running the base case example, you will see something like the output below, which represents the output of the `AgentPlanner`
|
When running the base case example, you will see something like the output below, which represents the output of the `AgentPlanner`
|
||||||
responsible for creating the step-by-step logic to add to the Agents' tasks.
|
responsible for creating the step-by-step logic to add to the Agents' tasks.
|
||||||
@@ -39,7 +39,6 @@ responsible for creating the step-by-step logic to add to the Agents' tasks.
|
|||||||
<CodeGroup>
|
<CodeGroup>
|
||||||
```python Code
|
```python Code
|
||||||
from crewai import Crew, Agent, Task, Process
|
from crewai import Crew, Agent, Task, Process
|
||||||
from langchain_openai import ChatOpenAI
|
|
||||||
|
|
||||||
# Assemble your crew with planning capabilities and custom LLM
|
# Assemble your crew with planning capabilities and custom LLM
|
||||||
my_crew = Crew(
|
my_crew = Crew(
|
||||||
@@ -47,7 +46,7 @@ my_crew = Crew(
|
|||||||
tasks=self.tasks,
|
tasks=self.tasks,
|
||||||
process=Process.sequential,
|
process=Process.sequential,
|
||||||
planning=True,
|
planning=True,
|
||||||
planning_llm=ChatOpenAI(model="gpt-4o")
|
planning_llm="gpt-4o"
|
||||||
)
|
)
|
||||||
|
|
||||||
# Run the crew
|
# Run the crew
|
||||||
|
|||||||
@@ -23,9 +23,7 @@ Processes enable individual agents to operate as a cohesive unit, streamlining t
|
|||||||
To assign a process to a crew, specify the process type upon crew creation to set the execution strategy. For a hierarchical process, ensure to define `manager_llm` or `manager_agent` for the manager agent.
|
To assign a process to a crew, specify the process type upon crew creation to set the execution strategy. For a hierarchical process, ensure to define `manager_llm` or `manager_agent` for the manager agent.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from crewai import Crew
|
from crewai import Crew, Process
|
||||||
from crewai.process import Process
|
|
||||||
from langchain_openai import ChatOpenAI
|
|
||||||
|
|
||||||
# Example: Creating a crew with a sequential process
|
# Example: Creating a crew with a sequential process
|
||||||
crew = Crew(
|
crew = Crew(
|
||||||
@@ -40,7 +38,7 @@ crew = Crew(
|
|||||||
agents=my_agents,
|
agents=my_agents,
|
||||||
tasks=my_tasks,
|
tasks=my_tasks,
|
||||||
process=Process.hierarchical,
|
process=Process.hierarchical,
|
||||||
manager_llm=ChatOpenAI(model="gpt-4")
|
manager_llm="gpt-4o"
|
||||||
# or
|
# or
|
||||||
# manager_agent=my_manager_agent
|
# manager_agent=my_manager_agent
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -73,9 +73,9 @@ result = crew.kickoff()
|
|||||||
If you're using the hierarchical process and don't want to set a custom manager agent, you can specify the language model for the manager:
|
If you're using the hierarchical process and don't want to set a custom manager agent, you can specify the language model for the manager:
|
||||||
|
|
||||||
```python Code
|
```python Code
|
||||||
from langchain_openai import ChatOpenAI
|
from crewai import LLM
|
||||||
|
|
||||||
manager_llm = ChatOpenAI(model_name="gpt-4")
|
manager_llm = LLM(model="gpt-4o")
|
||||||
|
|
||||||
crew = Crew(
|
crew = Crew(
|
||||||
agents=[researcher, writer],
|
agents=[researcher, writer],
|
||||||
|
|||||||
@@ -301,38 +301,166 @@ Use the annotations to properly reference the agent and task in the `crew.py` fi
|
|||||||
|
|
||||||
### Annotations include:
|
### Annotations include:
|
||||||
|
|
||||||
* `@agent`
|
Here are examples of how to use each annotation in your CrewAI project, and when you should use them:
|
||||||
* `@task`
|
|
||||||
* `@crew`
|
|
||||||
* `@tool`
|
|
||||||
* `@before_kickoff`
|
|
||||||
* `@after_kickoff`
|
|
||||||
* `@callback`
|
|
||||||
* `@output_json`
|
|
||||||
* `@output_pydantic`
|
|
||||||
* `@cache_handler`
|
|
||||||
|
|
||||||
```python crew.py
|
#### @agent
|
||||||
# ...
|
Used to define an agent in your crew. Use this when:
|
||||||
|
- You need to create a specialized AI agent with a specific role
|
||||||
|
- You want the agent to be automatically collected and managed by the crew
|
||||||
|
- You need to reuse the same agent configuration across multiple tasks
|
||||||
|
|
||||||
|
```python
|
||||||
@agent
|
@agent
|
||||||
def email_summarizer(self) -> Agent:
|
def research_agent(self) -> Agent:
|
||||||
return Agent(
|
return Agent(
|
||||||
config=self.agents_config["email_summarizer"],
|
role="Research Analyst",
|
||||||
|
goal="Conduct thorough research on given topics",
|
||||||
|
backstory="Expert researcher with years of experience in data analysis",
|
||||||
|
tools=[SerperDevTool()],
|
||||||
|
verbose=True
|
||||||
)
|
)
|
||||||
|
|
||||||
@task
|
|
||||||
def email_summarizer_task(self) -> Task:
|
|
||||||
return Task(
|
|
||||||
config=self.tasks_config["email_summarizer_task"],
|
|
||||||
)
|
|
||||||
# ...
|
|
||||||
```
|
```
|
||||||
|
|
||||||
<Tip>
|
#### @task
|
||||||
In addition to the [sequential process](../how-to/sequential-process), you can use the [hierarchical process](../how-to/hierarchical-process),
|
Used to define a task that can be executed by agents. Use this when:
|
||||||
which automatically assigns a manager to the defined crew to properly coordinate the planning and execution of tasks through delegation and validation of results.
|
- You need to define a specific piece of work for an agent
|
||||||
You can learn more about the core concepts [here](/concepts).
|
- You want tasks to be automatically sequenced and managed
|
||||||
</Tip>
|
- You need to establish dependencies between different tasks
|
||||||
|
|
||||||
|
```python
|
||||||
|
@task
|
||||||
|
def research_task(self) -> Task:
|
||||||
|
return Task(
|
||||||
|
description="Research the latest developments in AI technology",
|
||||||
|
expected_output="A comprehensive report on AI advancements",
|
||||||
|
agent=self.research_agent(),
|
||||||
|
output_file="output/research.md"
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### @crew
|
||||||
|
Used to define your crew configuration. Use this when:
|
||||||
|
- You want to automatically collect all @agent and @task definitions
|
||||||
|
- You need to specify how tasks should be processed (sequential or hierarchical)
|
||||||
|
- You want to set up crew-wide configurations
|
||||||
|
|
||||||
|
```python
|
||||||
|
@crew
|
||||||
|
def research_crew(self) -> Crew:
|
||||||
|
return Crew(
|
||||||
|
agents=self.agents, # Automatically collected from @agent methods
|
||||||
|
tasks=self.tasks, # Automatically collected from @task methods
|
||||||
|
process=Process.sequential,
|
||||||
|
verbose=True
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### @tool
|
||||||
|
Used to create custom tools for your agents. Use this when:
|
||||||
|
- You need to give agents specific capabilities (like web search, data analysis)
|
||||||
|
- You want to encapsulate external API calls or complex operations
|
||||||
|
- You need to share functionality across multiple agents
|
||||||
|
|
||||||
|
```python
|
||||||
|
@tool
|
||||||
|
def web_search_tool(query: str, max_results: int = 5) -> list[str]:
|
||||||
|
"""
|
||||||
|
Search the web for information.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
query: The search query
|
||||||
|
max_results: Maximum number of results to return
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of search results
|
||||||
|
"""
|
||||||
|
# Implement your search logic here
|
||||||
|
return [f"Result {i} for: {query}" for i in range(max_results)]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### @before_kickoff
|
||||||
|
Used to execute logic before the crew starts. Use this when:
|
||||||
|
- You need to validate or preprocess input data
|
||||||
|
- You want to set up resources or configurations before execution
|
||||||
|
- You need to perform any initialization logic
|
||||||
|
|
||||||
|
```python
|
||||||
|
@before_kickoff
|
||||||
|
def validate_inputs(self, inputs: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
|
||||||
|
"""Validate and preprocess inputs before the crew starts."""
|
||||||
|
if inputs is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
if 'topic' not in inputs:
|
||||||
|
raise ValueError("Topic is required")
|
||||||
|
|
||||||
|
# Add additional context
|
||||||
|
inputs['timestamp'] = datetime.now().isoformat()
|
||||||
|
inputs['topic'] = inputs['topic'].strip().lower()
|
||||||
|
return inputs
|
||||||
|
```
|
||||||
|
|
||||||
|
#### @after_kickoff
|
||||||
|
Used to process results after the crew completes. Use this when:
|
||||||
|
- You need to format or transform the final output
|
||||||
|
- You want to perform cleanup operations
|
||||||
|
- You need to save or log the results in a specific way
|
||||||
|
|
||||||
|
```python
|
||||||
|
@after_kickoff
|
||||||
|
def process_results(self, result: CrewOutput) -> CrewOutput:
|
||||||
|
"""Process and format the results after the crew completes."""
|
||||||
|
result.raw = result.raw.strip()
|
||||||
|
result.raw = f"""
|
||||||
|
# Research Results
|
||||||
|
Generated on: {datetime.now().isoformat()}
|
||||||
|
|
||||||
|
{result.raw}
|
||||||
|
"""
|
||||||
|
return result
|
||||||
|
```
|
||||||
|
|
||||||
|
#### @callback
|
||||||
|
Used to handle events during crew execution. Use this when:
|
||||||
|
- You need to monitor task progress
|
||||||
|
- You want to log intermediate results
|
||||||
|
- You need to implement custom progress tracking or metrics
|
||||||
|
|
||||||
|
```python
|
||||||
|
@callback
|
||||||
|
def log_task_completion(self, task: Task, output: str):
|
||||||
|
"""Log task completion details for monitoring."""
|
||||||
|
print(f"Task '{task.description}' completed")
|
||||||
|
print(f"Output length: {len(output)} characters")
|
||||||
|
print(f"Agent used: {task.agent.role}")
|
||||||
|
print("-" * 50)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### @cache_handler
|
||||||
|
Used to implement custom caching for task results. Use this when:
|
||||||
|
- You want to avoid redundant expensive operations
|
||||||
|
- You need to implement custom cache storage or expiration logic
|
||||||
|
- You want to persist results between runs
|
||||||
|
|
||||||
|
```python
|
||||||
|
@cache_handler
|
||||||
|
def custom_cache(self, key: str) -> Optional[str]:
|
||||||
|
"""Custom cache implementation for storing task results."""
|
||||||
|
cache_file = f"cache/{key}.json"
|
||||||
|
|
||||||
|
if os.path.exists(cache_file):
|
||||||
|
with open(cache_file, 'r') as f:
|
||||||
|
data = json.load(f)
|
||||||
|
# Check if cache is still valid (e.g., not expired)
|
||||||
|
if datetime.fromisoformat(data['timestamp']) > datetime.now() - timedelta(days=1):
|
||||||
|
return data['result']
|
||||||
|
return None
|
||||||
|
```
|
||||||
|
|
||||||
|
<Note>
|
||||||
|
These decorators are part of the CrewAI framework and help organize your crew's structure by automatically collecting agents, tasks, and handling various lifecycle events.
|
||||||
|
They should be used within a class decorated with `@CrewBase`.
|
||||||
|
</Note>
|
||||||
|
|
||||||
### Replay Tasks from Latest Crew Kickoff
|
### Replay Tasks from Latest Crew Kickoff
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user