mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-02-19 20:38:13 +00:00
Compare commits
3 Commits
devin/1751
...
gl/chore/a
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4b2d5633c1 | ||
|
|
f071966951 | ||
|
|
318310bb7a |
@@ -5,3 +5,7 @@ repos:
|
||||
- id: ruff
|
||||
args: ["--fix"]
|
||||
- id: ruff-format
|
||||
- repo: https://github.com/commitizen-tools/commitizen
|
||||
rev: v3.13.0
|
||||
hooks:
|
||||
- id: commitizen
|
||||
|
||||
@@ -94,7 +94,7 @@
|
||||
"pages": [
|
||||
"en/guides/advanced/customizing-prompts",
|
||||
"en/guides/advanced/fingerprinting"
|
||||
|
||||
|
||||
]
|
||||
}
|
||||
]
|
||||
@@ -296,7 +296,8 @@
|
||||
"en/enterprise/features/webhook-streaming",
|
||||
"en/enterprise/features/traces",
|
||||
"en/enterprise/features/hallucination-guardrail",
|
||||
"en/enterprise/features/integrations"
|
||||
"en/enterprise/features/integrations",
|
||||
"en/enterprise/features/agent-repositories"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -373,7 +374,7 @@
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -730,7 +731,7 @@
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
]
|
||||
}
|
||||
]
|
||||
@@ -774,7 +775,7 @@
|
||||
"destination": "/en/introduction"
|
||||
},
|
||||
{
|
||||
"source": "/installation",
|
||||
"source": "/installation",
|
||||
"destination": "/en/installation"
|
||||
},
|
||||
{
|
||||
|
||||
@@ -526,6 +526,103 @@ agent = Agent(
|
||||
The context window management feature works automatically in the background. You don't need to call any special functions - just set `respect_context_window` to your preferred behavior and CrewAI handles the rest!
|
||||
</Note>
|
||||
|
||||
## Direct Agent Interaction with `kickoff()`
|
||||
|
||||
Agents can be used directly without going through a task or crew workflow using the `kickoff()` method. This provides a simpler way to interact with an agent when you don't need the full crew orchestration capabilities.
|
||||
|
||||
### How `kickoff()` Works
|
||||
|
||||
The `kickoff()` method allows you to send messages directly to an agent and get a response, similar to how you would interact with an LLM but with all the agent's capabilities (tools, reasoning, etc.).
|
||||
|
||||
```python Code
|
||||
from crewai import Agent
|
||||
from crewai_tools import SerperDevTool
|
||||
|
||||
# Create an agent
|
||||
researcher = Agent(
|
||||
role="AI Technology Researcher",
|
||||
goal="Research the latest AI developments",
|
||||
tools=[SerperDevTool()],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
# Use kickoff() to interact directly with the agent
|
||||
result = researcher.kickoff("What are the latest developments in language models?")
|
||||
|
||||
# Access the raw response
|
||||
print(result.raw)
|
||||
```
|
||||
|
||||
### Parameters and Return Values
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| :---------------- | :---------------------------------- | :------------------------------------------------------------------------ |
|
||||
| `messages` | `Union[str, List[Dict[str, str]]]` | Either a string query or a list of message dictionaries with role/content |
|
||||
| `response_format` | `Optional[Type[Any]]` | Optional Pydantic model for structured output |
|
||||
|
||||
The method returns a `LiteAgentOutput` object with the following properties:
|
||||
|
||||
- `raw`: String containing the raw output text
|
||||
- `pydantic`: Parsed Pydantic model (if a `response_format` was provided)
|
||||
- `agent_role`: Role of the agent that produced the output
|
||||
- `usage_metrics`: Token usage metrics for the execution
|
||||
|
||||
### Structured Output
|
||||
|
||||
You can get structured output by providing a Pydantic model as the `response_format`:
|
||||
|
||||
```python Code
|
||||
from pydantic import BaseModel
|
||||
from typing import List
|
||||
|
||||
class ResearchFindings(BaseModel):
|
||||
main_points: List[str]
|
||||
key_technologies: List[str]
|
||||
future_predictions: str
|
||||
|
||||
# Get structured output
|
||||
result = researcher.kickoff(
|
||||
"Summarize the latest developments in AI for 2025",
|
||||
response_format=ResearchFindings
|
||||
)
|
||||
|
||||
# Access structured data
|
||||
print(result.pydantic.main_points)
|
||||
print(result.pydantic.future_predictions)
|
||||
```
|
||||
|
||||
### Multiple Messages
|
||||
|
||||
You can also provide a conversation history as a list of message dictionaries:
|
||||
|
||||
```python Code
|
||||
messages = [
|
||||
{"role": "user", "content": "I need information about large language models"},
|
||||
{"role": "assistant", "content": "I'd be happy to help with that! What specifically would you like to know?"},
|
||||
{"role": "user", "content": "What are the latest developments in 2025?"}
|
||||
]
|
||||
|
||||
result = researcher.kickoff(messages)
|
||||
```
|
||||
|
||||
### Async Support
|
||||
|
||||
An asynchronous version is available via `kickoff_async()` with the same parameters:
|
||||
|
||||
```python Code
|
||||
import asyncio
|
||||
|
||||
async def main():
|
||||
result = await researcher.kickoff_async("What are the latest developments in AI?")
|
||||
print(result.raw)
|
||||
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
<Note>
|
||||
The `kickoff()` method uses a `LiteAgent` internally, which provides a simpler execution flow while preserving all of the agent's configuration (role, goal, backstory, tools, etc.).
|
||||
</Note>
|
||||
|
||||
## Important Considerations and Best Practices
|
||||
|
||||
### Security and Code Execution
|
||||
|
||||
155
docs/en/enterprise/features/agent-repositories.mdx
Normal file
155
docs/en/enterprise/features/agent-repositories.mdx
Normal file
@@ -0,0 +1,155 @@
|
||||
---
|
||||
title: 'Agent Repositories'
|
||||
description: 'Learn how to use Agent Repositories to share and reuse your agents across teams and projects'
|
||||
icon: 'database'
|
||||
---
|
||||
|
||||
Agent Repositories allow enterprise users to store, share, and reuse agent definitions across teams and projects. This feature enables organizations to maintain a centralized library of standardized agents, promoting consistency and reducing duplication of effort.
|
||||
|
||||
## Benefits of Agent Repositories
|
||||
|
||||
- **Standardization**: Maintain consistent agent definitions across your organization
|
||||
- **Reusability**: Create an agent once and use it in multiple crews and projects
|
||||
- **Governance**: Implement organization-wide policies for agent configurations
|
||||
- **Collaboration**: Enable teams to share and build upon each other's work
|
||||
|
||||
## Using Agent Repositories
|
||||
|
||||
### Prerequisites
|
||||
|
||||
1. You must have an account at CrewAI, try the [free plan](https://app.crewai.com).
|
||||
2. You need to be authenticated using the CrewAI CLI.
|
||||
3. If you have more than one organization, make sure you are switched to the correct organization using the CLI command:
|
||||
|
||||
```bash
|
||||
crewai org switch <org_id>
|
||||
```
|
||||
|
||||
### Creating and Managing Agents in Repositories
|
||||
|
||||
To create and manage agents in repositories,Enterprise Dashboard.
|
||||
|
||||
### Loading Agents from Repositories
|
||||
|
||||
You can load agents from repositories in your code using the `from_repository` parameter:
|
||||
|
||||
```python
|
||||
from crewai import Agent
|
||||
|
||||
# Create an agent by loading it from a repository
|
||||
# The agent is loaded with all its predefined configurations
|
||||
researcher = Agent(
|
||||
from_repository="market-research-agent"
|
||||
)
|
||||
|
||||
```
|
||||
|
||||
### Overriding Repository Settings
|
||||
|
||||
You can override specific settings from the repository by providing them in the configuration:
|
||||
|
||||
```python
|
||||
researcher = Agent(
|
||||
from_repository="market-research-agent",
|
||||
goal="Research the latest trends in AI development", # Override the repository goal
|
||||
verbose=True # Add a setting not in the repository
|
||||
)
|
||||
```
|
||||
|
||||
### Example: Creating a Crew with Repository Agents
|
||||
|
||||
```python
|
||||
from crewai import Crew, Agent, Task
|
||||
|
||||
# Load agents from repositories
|
||||
researcher = Agent(
|
||||
from_repository="market-research-agent"
|
||||
)
|
||||
|
||||
writer = Agent(
|
||||
from_repository="content-writer-agent"
|
||||
)
|
||||
|
||||
# Create tasks
|
||||
research_task = Task(
|
||||
description="Research the latest trends in AI",
|
||||
agent=researcher
|
||||
)
|
||||
|
||||
writing_task = Task(
|
||||
description="Write a comprehensive report based on the research",
|
||||
agent=writer
|
||||
)
|
||||
|
||||
# Create the crew
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
tasks=[research_task, writing_task],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
# Run the crew
|
||||
result = crew.kickoff()
|
||||
```
|
||||
|
||||
### Example: Using `kickoff()` with Repository Agents
|
||||
|
||||
You can also use repository agents directly with the `kickoff()` method for simpler interactions:
|
||||
|
||||
```python
|
||||
from crewai import Agent
|
||||
from pydantic import BaseModel
|
||||
from typing import List
|
||||
|
||||
# Define a structured output format
|
||||
class MarketAnalysis(BaseModel):
|
||||
key_trends: List[str]
|
||||
opportunities: List[str]
|
||||
recommendation: str
|
||||
|
||||
# Load an agent from repository
|
||||
analyst = Agent(
|
||||
from_repository="market-analyst-agent",
|
||||
verbose=True
|
||||
)
|
||||
|
||||
# Get a free-form response
|
||||
result = analyst.kickoff("Analyze the AI market in 2025")
|
||||
print(result.raw) # Access the raw response
|
||||
|
||||
# Get structured output
|
||||
structured_result = analyst.kickoff(
|
||||
"Provide a structured analysis of the AI market in 2025",
|
||||
response_format=MarketAnalysis
|
||||
)
|
||||
|
||||
# Access structured data
|
||||
print(f"Key Trends: {structured_result.pydantic.key_trends}")
|
||||
print(f"Recommendation: {structured_result.pydantic.recommendation}")
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Naming Convention**: Use clear, descriptive names for your repository agents
|
||||
2. **Documentation**: Include comprehensive descriptions for each agent
|
||||
3. **Tool Management**: Ensure that tools referenced by repository agents are available in your environment
|
||||
4. **Access Control**: Manage permissions to ensure only authorized team members can modify repository agents
|
||||
|
||||
## Organization Management
|
||||
|
||||
To switch between organizations or see your current organization, use the CrewAI CLI:
|
||||
|
||||
```bash
|
||||
# View current organization
|
||||
crewai org current
|
||||
|
||||
# Switch to a different organization
|
||||
crewai org switch <org_id>
|
||||
|
||||
# List all available organizations
|
||||
crewai org list
|
||||
```
|
||||
|
||||
<Note>
|
||||
When loading agents from repositories, you must be authenticated and switched to the correct organization. If you receive errors, check your authentication status and organization settings using the CLI commands above.
|
||||
</Note>
|
||||
@@ -103,12 +103,6 @@ ENV_VARS = {
|
||||
"key_name": "SAMBANOVA_API_KEY",
|
||||
}
|
||||
],
|
||||
"deepseek": [
|
||||
{
|
||||
"prompt": "Enter your DeepSeek API key (press Enter to skip)",
|
||||
"key_name": "DEEPSEEK_API_KEY",
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
@@ -125,7 +119,6 @@ PROVIDERS = [
|
||||
"azure",
|
||||
"cerebras",
|
||||
"sambanova",
|
||||
"deepseek",
|
||||
]
|
||||
|
||||
MODELS = {
|
||||
@@ -321,13 +314,6 @@ MODELS = {
|
||||
"sambanova/Meta-Llama-3.2-3B-Instruct",
|
||||
"sambanova/Meta-Llama-3.2-1B-Instruct",
|
||||
],
|
||||
"deepseek": [
|
||||
"deepseek/deepseek-chat",
|
||||
"deepseek/deepseek-coder",
|
||||
"deepseek/deepseek-r1",
|
||||
"deepseek/deepseek-v3",
|
||||
"deepseek/deepseek-reasoner",
|
||||
],
|
||||
}
|
||||
|
||||
DEFAULT_LLM_MODEL = "gpt-4o-mini"
|
||||
|
||||
@@ -154,11 +154,6 @@ LLM_CONTEXT_WINDOW_SIZES = {
|
||||
"gemini/gemma-3-27b-it": 128000,
|
||||
# deepseek
|
||||
"deepseek-chat": 128000,
|
||||
"deepseek/deepseek-chat": 128000,
|
||||
"deepseek/deepseek-coder": 128000,
|
||||
"deepseek/deepseek-r1": 128000,
|
||||
"deepseek/deepseek-v3": 128000,
|
||||
"deepseek/deepseek-reasoner": 128000,
|
||||
# groq
|
||||
"gemma2-9b-it": 8192,
|
||||
"gemma-7b-it": 8192,
|
||||
|
||||
@@ -1,114 +0,0 @@
|
||||
"""Tests for DeepSeek integration in CrewAI."""
|
||||
|
||||
from unittest.mock import patch
|
||||
|
||||
from crewai.llm import LLM
|
||||
from crewai.cli.constants import ENV_VARS, PROVIDERS, MODELS
|
||||
|
||||
|
||||
class TestDeepSeekIntegration:
|
||||
"""Test DeepSeek integration in CrewAI."""
|
||||
|
||||
def test_deepseek_in_providers(self):
|
||||
"""Test that DeepSeek is included in the providers list."""
|
||||
assert "deepseek" in PROVIDERS
|
||||
|
||||
def test_deepseek_in_env_vars(self):
|
||||
"""Test that DeepSeek API key configuration is in ENV_VARS."""
|
||||
assert "deepseek" in ENV_VARS
|
||||
deepseek_config = ENV_VARS["deepseek"]
|
||||
assert len(deepseek_config) == 1
|
||||
assert deepseek_config[0]["key_name"] == "DEEPSEEK_API_KEY"
|
||||
assert "DeepSeek API key" in deepseek_config[0]["prompt"]
|
||||
|
||||
def test_deepseek_in_models(self):
|
||||
"""Test that DeepSeek models are included in the models dictionary."""
|
||||
assert "deepseek" in MODELS
|
||||
deepseek_models = MODELS["deepseek"]
|
||||
expected_models = [
|
||||
"deepseek/deepseek-chat",
|
||||
"deepseek/deepseek-coder",
|
||||
"deepseek/deepseek-r1",
|
||||
"deepseek/deepseek-v3",
|
||||
"deepseek/deepseek-reasoner",
|
||||
]
|
||||
for model in expected_models:
|
||||
assert model in deepseek_models
|
||||
|
||||
def test_llm_creation_with_deepseek_chat(self):
|
||||
"""Test creating LLM instance with deepseek-chat model."""
|
||||
llm = LLM(model="deepseek-chat")
|
||||
assert llm.model == "deepseek-chat"
|
||||
assert llm.get_context_window_size() > 0
|
||||
|
||||
def test_llm_creation_with_deepseek_prefix(self):
|
||||
"""Test creating LLM instance with deepseek/ prefix."""
|
||||
llm = LLM(model="deepseek/deepseek-chat")
|
||||
assert llm.model == "deepseek/deepseek-chat"
|
||||
assert llm._get_custom_llm_provider() == "deepseek"
|
||||
assert llm.get_context_window_size() > 0
|
||||
|
||||
def test_deepseek_context_window_sizes(self):
|
||||
"""Test that all DeepSeek models have context window sizes defined."""
|
||||
from crewai.llm import LLM_CONTEXT_WINDOW_SIZES
|
||||
|
||||
deepseek_models = [
|
||||
"deepseek-chat",
|
||||
"deepseek/deepseek-chat",
|
||||
"deepseek/deepseek-coder",
|
||||
"deepseek/deepseek-r1",
|
||||
"deepseek/deepseek-v3",
|
||||
"deepseek/deepseek-reasoner",
|
||||
]
|
||||
|
||||
for model in deepseek_models:
|
||||
assert model in LLM_CONTEXT_WINDOW_SIZES
|
||||
assert LLM_CONTEXT_WINDOW_SIZES[model] > 0
|
||||
|
||||
def test_deepseek_models_context_window_consistency(self):
|
||||
"""Test that DeepSeek models have consistent context window sizes."""
|
||||
from crewai.llm import LLM_CONTEXT_WINDOW_SIZES
|
||||
|
||||
expected_size = 128000
|
||||
deepseek_models = [
|
||||
"deepseek-chat",
|
||||
"deepseek/deepseek-chat",
|
||||
"deepseek/deepseek-coder",
|
||||
"deepseek/deepseek-r1",
|
||||
"deepseek/deepseek-v3",
|
||||
"deepseek/deepseek-reasoner",
|
||||
]
|
||||
|
||||
for model in deepseek_models:
|
||||
assert LLM_CONTEXT_WINDOW_SIZES[model] == expected_size
|
||||
|
||||
@patch.dict("os.environ", {"DEEPSEEK_API_KEY": "test-key"})
|
||||
def test_llm_with_deepseek_api_key(self):
|
||||
"""Test LLM creation with DeepSeek API key in environment."""
|
||||
llm = LLM(model="deepseek/deepseek-chat")
|
||||
assert llm.model == "deepseek/deepseek-chat"
|
||||
assert llm._get_custom_llm_provider() == "deepseek"
|
||||
|
||||
def test_deepseek_provider_detection(self):
|
||||
"""Test that DeepSeek provider is correctly detected from model name."""
|
||||
llm = LLM(model="deepseek/deepseek-chat")
|
||||
provider = llm._get_custom_llm_provider()
|
||||
assert provider == "deepseek"
|
||||
|
||||
def test_deepseek_vs_openrouter_provider_detection(self):
|
||||
"""Test provider detection for DeepSeek vs OpenRouter DeepSeek models."""
|
||||
deepseek_llm = LLM(model="deepseek/deepseek-chat")
|
||||
openrouter_llm = LLM(model="openrouter/deepseek/deepseek-chat")
|
||||
|
||||
assert deepseek_llm._get_custom_llm_provider() == "deepseek"
|
||||
assert openrouter_llm._get_custom_llm_provider() == "openrouter"
|
||||
|
||||
def test_all_deepseek_models_can_be_instantiated(self):
|
||||
"""Test that all DeepSeek models in MODELS can be instantiated."""
|
||||
deepseek_models = MODELS["deepseek"]
|
||||
|
||||
for model in deepseek_models:
|
||||
llm = LLM(model=model)
|
||||
assert llm.model == model
|
||||
assert llm._get_custom_llm_provider() == "deepseek"
|
||||
assert llm.get_context_window_size() > 0
|
||||
Reference in New Issue
Block a user