mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-08 15:48:29 +00:00
adding manager_llm
This commit is contained in:
BIN
.cache/plugin/social/e580fe32a1d3f15fc89057d053ae3e52.png
Normal file
BIN
.cache/plugin/social/e580fe32a1d3f15fc89057d053ae3e52.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 39 KiB |
@@ -1,95 +0,0 @@
|
||||
# Getting Started
|
||||
|
||||
To get started with CrewAI, follow these simple steps:
|
||||
|
||||
1. **Installation**:
|
||||
|
||||
```shell
|
||||
pip install crewai
|
||||
```
|
||||
|
||||
The example below also uses duckduckgo, so also install that
|
||||
```shell
|
||||
pip install duckduckgo-search
|
||||
```
|
||||
|
||||
2. **Setting Up Your Crew**:
|
||||
|
||||
```python
|
||||
import os
|
||||
from crewai import Agent, Task, Crew, Process
|
||||
|
||||
os.environ["OPENAI_API_KEY"] = "YOUR KEY"
|
||||
|
||||
# You can choose to use a local model through Ollama for example. See ./docs/llm-connections.md for more information.
|
||||
# from langchain.llms import Ollama
|
||||
# ollama_llm = Ollama(model="openhermes")
|
||||
|
||||
# Install duckduckgo-search for this example:
|
||||
# !pip install -U duckduckgo-search
|
||||
|
||||
from langchain_community.tools import DuckDuckGoSearchRun
|
||||
search_tool = DuckDuckGoSearchRun()
|
||||
|
||||
# Define your agents with roles and goals
|
||||
researcher = Agent(
|
||||
role='Senior Research Analyst',
|
||||
goal='Uncover cutting-edge developments in AI and data science',
|
||||
backstory="""You work at a leading tech think tank.
|
||||
Your expertise lies in identifying emerging trends.
|
||||
You have a knack for dissecting complex data and presenting
|
||||
actionable insights.""",
|
||||
verbose=True,
|
||||
allow_delegation=False,
|
||||
tools=[search_tool]
|
||||
# You can pass an optional llm attribute specifying what mode you wanna use.
|
||||
# It can be a local model through Ollama / LM Studio or a remote
|
||||
# model like OpenAI, Mistral, Antrophic of others (https://python.langchain.com/docs/integrations/llms/)
|
||||
#
|
||||
# Examples:
|
||||
# llm=ollama_llm # was defined above in the file
|
||||
# llm=ChatOpenAI(model_name="gpt-3.5", temperature=0.7)
|
||||
)
|
||||
writer = Agent(
|
||||
role='Tech Content Strategist',
|
||||
goal='Craft compelling content on tech advancements',
|
||||
backstory="""You are a renowned Content Strategist, known for
|
||||
your insightful and engaging articles.
|
||||
You transform complex concepts into compelling narratives.""",
|
||||
verbose=True,
|
||||
allow_delegation=True,
|
||||
# (optional) llm=ollama_llm
|
||||
)
|
||||
|
||||
# Create tasks for your agents
|
||||
task1 = Task(
|
||||
description="""Conduct a comprehensive analysis of the latest advancements in AI in 2024.
|
||||
Identify key trends, breakthrough technologies, and potential industry impacts.
|
||||
Your final answer MUST be a full analysis report""",
|
||||
agent=researcher
|
||||
)
|
||||
|
||||
task2 = Task(
|
||||
description="""Using the insights provided, develop an engaging blog
|
||||
post that highlights the most significant AI advancements.
|
||||
Your post should be informative yet accessible, catering to a tech-savvy audience.
|
||||
Make it sound cool, avoid complex words so it doesn't sound like AI.
|
||||
Your final answer MUST be the full blog post of at least 4 paragraphs.""",
|
||||
agent=writer
|
||||
)
|
||||
|
||||
# Instantiate your crew with a sequential process
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
tasks=[task1, task2],
|
||||
verbose=2, # You can set it to 1 or 2 to different logging levels
|
||||
)
|
||||
|
||||
# Get your crew to work!
|
||||
result = crew.kickoff()
|
||||
|
||||
print("######################")
|
||||
print(result)
|
||||
```
|
||||
|
||||
Currently the only supported process is `Process.sequential`, where one task is executed after the other and the outcome of one is passed as extra content into this next.
|
||||
@@ -4,7 +4,15 @@ description: A step-by-step guide to creating a cohesive CrewAI team for your pr
|
||||
---
|
||||
|
||||
## Introduction
|
||||
Assembling a crew in CrewAI is akin to casting for a play, where each agent plays a unique role. This guide walks you through creating a crew, assigning roles and tasks, and activating them to work in harmony.
|
||||
Embarking on your CrewAI journey involves a few straightforward steps to set up your environment and initiate your AI crew. This guide ensures a seamless start.
|
||||
|
||||
## Step 0: Installation
|
||||
Begin by installing CrewAI and any additional packages required for your project. For instance, the `duckduckgo-search` package is used in this example for enhanced search capabilities.
|
||||
|
||||
```shell
|
||||
pip install crewai
|
||||
pip install duckduckgo-search
|
||||
```
|
||||
|
||||
## Step 1: Assemble Your Agents
|
||||
Begin by defining your agents with distinct roles and backstories. These elements not only add depth but also guide their task execution and interaction within the crew.
|
||||
|
||||
@@ -23,7 +23,11 @@ To utilize the hierarchical process, you must define a crew with a designated ma
|
||||
!!! note "Tools on the hierarchical process"
|
||||
For tools when using the hierarchical process, you want to make sure to assign them to the agents instead of the tasks, as the manager will be the one delegating the tasks and the agents will be the ones executing them.
|
||||
|
||||
!!! note "Manager LLM"
|
||||
A manager will be automatically set for the crew, you don't need to define it. You do need to set the `manager_llm` parameter in the crew though.
|
||||
|
||||
```python
|
||||
from langchain_openai import ChatOpenAI
|
||||
from crewai import Crew, Process, Agent
|
||||
|
||||
# Define your agents, no need to define a manager
|
||||
@@ -42,6 +46,7 @@ writer = Agent(
|
||||
project_crew = Crew(
|
||||
tasks=[...], # Tasks that that manager will figure out how to complete
|
||||
agents=[researcher, writer],
|
||||
manager_llm=ChatOpenAI(temperature=0, model="gpt-4"), # The manager's LLM that will be used internally
|
||||
process=Process.hierarchical # Designating the hierarchical approach
|
||||
)
|
||||
```
|
||||
|
||||
@@ -33,6 +33,21 @@ Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By
|
||||
<div style="width:30%">
|
||||
<h2>How-To Guides</h2>
|
||||
<ul>
|
||||
<li>
|
||||
<a href="./how-to/Creating-a-Crew-and-kick-it-off">
|
||||
Getting Started
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="./how-to/how-to/Sequential">
|
||||
Using Sequential Process
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="./how-to/Hierarchical">
|
||||
Using Hierarchical Process
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="./how-to/LLM-Connections">
|
||||
Connecting to LLMs
|
||||
@@ -43,11 +58,6 @@ Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By
|
||||
Customizing Agents
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="./how-to/Creating-a-Crew-and-kick-it-off">
|
||||
Creating a Crew and kick it off
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="./how-to/Human-Input-on-Execution">
|
||||
Human Input on Execution
|
||||
@@ -58,6 +68,11 @@ Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By
|
||||
<div style="width:30%">
|
||||
<h2>Examples</h2>
|
||||
<ul>
|
||||
<li>
|
||||
<a target='_blank' href="https://github.com/joaomdmoura/crewAI-examples/tree/main/prep-for-a-meeting">
|
||||
Prepare for meetings
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a target='_blank' href="https://github.com/joaomdmoura/crewAI-examples/tree/main/trip_planner">
|
||||
Trip Planner Crew
|
||||
|
||||
@@ -126,7 +126,7 @@ nav:
|
||||
- Processes: 'core-concepts/Processes.md'
|
||||
- Collaboration: 'core-concepts/Collaboration.md'
|
||||
- How to Guides:
|
||||
- Creating a Crew Automation: 'how-to/Creating-a-Crew-and-kick-it-off.md'
|
||||
- Getting Started: 'how-to/Creating-a-Crew-and-kick-it-off.md'
|
||||
- Using Sequential Process: 'how-to/Sequential.md'
|
||||
- Using Hierarchical Process: 'how-to/Hierarchical.md'
|
||||
- Connecting to any LLM: 'how-to/LLM-Connections.md'
|
||||
@@ -139,6 +139,7 @@ nav:
|
||||
- Game Generator: https://github.com/joaomdmoura/crewAI-examples/tree/main/game-builder-crew"
|
||||
- Drafting emails with LangGraph: https://github.com/joaomdmoura/crewAI-examples/tree/main/CrewAI-LangGraph"
|
||||
- Landing Page Generator: https://github.com/joaomdmoura/crewAI-examples/tree/main/landing_page_generator"
|
||||
- Prepare for meetings: https://github.com/joaomdmoura/crewAI-examples/tree/main/prep-for-a-meeting"
|
||||
extra_css:
|
||||
- stylesheets/output.css
|
||||
- stylesheets/extra.css
|
||||
|
||||
@@ -30,6 +30,7 @@ class Crew(BaseModel):
|
||||
Attributes:
|
||||
tasks: List of tasks assigned to the crew.
|
||||
agents: List of agents part of this crew.
|
||||
manager_llm: The language model that will run manager agent.
|
||||
process: The process flow that the crew will follow (e.g., sequential).
|
||||
verbose: Indicates the verbosity level for logging during execution.
|
||||
config: Configuration settings for the crew.
|
||||
@@ -47,6 +48,9 @@ class Crew(BaseModel):
|
||||
agents: List[Agent] = Field(default_factory=list)
|
||||
process: Process = Field(default=Process.sequential)
|
||||
verbose: Union[int, bool] = Field(default=0)
|
||||
manager_llm: Optional[Any] = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
config: Optional[Union[Json, Dict[str, Any]]] = Field(default=None)
|
||||
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
|
||||
max_rpm: Optional[int] = Field(
|
||||
@@ -90,6 +94,17 @@ class Crew(BaseModel):
|
||||
self._rpm_controller = RPMController(max_rpm=self.max_rpm, logger=self._logger)
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def check_manager_llm(self):
|
||||
"""Validates that the language model is set when using hierarchical process."""
|
||||
if self.process == Process.hierarchical and not self.manager_llm:
|
||||
raise PydanticCustomError(
|
||||
"missing_manager_llm",
|
||||
"Attribute `manager_llm` is required when using hierarchical process.",
|
||||
{},
|
||||
)
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def check_config(self):
|
||||
"""Validates that the crew is properly configured with agents and tasks."""
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
import json
|
||||
|
||||
import pydantic_core
|
||||
import pytest
|
||||
|
||||
from crewai.agent import Agent
|
||||
@@ -144,6 +145,8 @@ def test_crew_creation():
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_hierarchical_process():
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
task = Task(
|
||||
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
|
||||
)
|
||||
@@ -151,6 +154,7 @@ def test_hierarchical_process():
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
process=Process.hierarchical,
|
||||
manager_llm=ChatOpenAI(temperature=0, model="gpt-4"),
|
||||
tasks=[task],
|
||||
)
|
||||
|
||||
@@ -175,6 +179,19 @@ def test_hierarchical_process():
|
||||
)
|
||||
|
||||
|
||||
def test_manager_llm_requirement_for_hierarchical_process():
|
||||
task = Task(
|
||||
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
|
||||
)
|
||||
|
||||
with pytest.raises(pydantic_core._pydantic_core.ValidationError):
|
||||
Crew(
|
||||
agents=[researcher, writer],
|
||||
process=Process.hierarchical,
|
||||
tasks=[task],
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_with_delegating_agents():
|
||||
tasks = [
|
||||
|
||||
Reference in New Issue
Block a user