mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-10 16:48:30 +00:00
Merge branch 'main' into feat/add-event-emitters-to-flows
This commit is contained in:
@@ -29,7 +29,7 @@ Large Language Models (LLMs) are the core intelligence behind CrewAI agents. The
|
|||||||
|
|
||||||
## Available Models and Their Capabilities
|
## Available Models and Their Capabilities
|
||||||
|
|
||||||
Here's a detailed breakdown of supported models and their capabilities:
|
Here's a detailed breakdown of supported models and their capabilities, you can compare performance at [lmarena.ai](https://lmarena.ai/):
|
||||||
|
|
||||||
<Tabs>
|
<Tabs>
|
||||||
<Tab title="OpenAI">
|
<Tab title="OpenAI">
|
||||||
@@ -43,6 +43,17 @@ Here's a detailed breakdown of supported models and their capabilities:
|
|||||||
1 token ≈ 4 characters in English. For example, 8,192 tokens ≈ 32,768 characters or about 6,000 words.
|
1 token ≈ 4 characters in English. For example, 8,192 tokens ≈ 32,768 characters or about 6,000 words.
|
||||||
</Note>
|
</Note>
|
||||||
</Tab>
|
</Tab>
|
||||||
|
<Tab title="Gemini">
|
||||||
|
| Model | Context Window | Best For |
|
||||||
|
|-------|---------------|-----------|
|
||||||
|
| Gemini 1.5 Flash | 1M tokens | Balanced multimodal model, good for most tasks |
|
||||||
|
| Gemini 1.5 Flash 8B | 1M tokens | Fastest, most cost-efficient, good for high-frequency tasks |
|
||||||
|
| Gemini 1.5 Pro | 2M tokens | Best performing, wide variety of reasoning tasks including logical reasoning, coding, and creative collaboration |
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
Google's Gemini models are all multimodal, supporting audio, images, video and text, supporting context caching, json schema, function calling, etc.
|
||||||
|
</Tip>
|
||||||
|
</Tab>
|
||||||
<Tab title="Groq">
|
<Tab title="Groq">
|
||||||
| Model | Context Window | Best For |
|
| Model | Context Window | Best For |
|
||||||
|-------|---------------|-----------|
|
|-------|---------------|-----------|
|
||||||
@@ -128,10 +139,10 @@ There are three ways to configure LLMs in CrewAI. Choose the method that best fi
|
|||||||
# llm: anthropic/claude-2.1
|
# llm: anthropic/claude-2.1
|
||||||
# llm: anthropic/claude-2.0
|
# llm: anthropic/claude-2.0
|
||||||
|
|
||||||
# Google Models - Good for general tasks
|
# Google Models - Strong reasoning, large cachable context window, multimodal
|
||||||
# llm: gemini/gemini-pro
|
|
||||||
# llm: gemini/gemini-1.5-pro-latest
|
# llm: gemini/gemini-1.5-pro-latest
|
||||||
# llm: gemini/gemini-1.0-pro-latest
|
# llm: gemini/gemini-1.5-flash-latest
|
||||||
|
# llm: gemini/gemini-1.5-flash-8b-latest
|
||||||
|
|
||||||
# AWS Bedrock Models - Enterprise-grade
|
# AWS Bedrock Models - Enterprise-grade
|
||||||
# llm: bedrock/anthropic.claude-3-sonnet-20240229-v1:0
|
# llm: bedrock/anthropic.claude-3-sonnet-20240229-v1:0
|
||||||
@@ -350,13 +361,18 @@ Learn how to get the most out of your LLM configuration:
|
|||||||
|
|
||||||
<Accordion title="Google">
|
<Accordion title="Google">
|
||||||
```python Code
|
```python Code
|
||||||
|
# Option 1. Gemini accessed with an API key.
|
||||||
|
# https://ai.google.dev/gemini-api/docs/api-key
|
||||||
GEMINI_API_KEY=<your-api-key>
|
GEMINI_API_KEY=<your-api-key>
|
||||||
|
|
||||||
|
# Option 2. Vertex AI IAM credentials for Gemini, Anthropic, and anything in the Model Garden.
|
||||||
|
# https://cloud.google.com/vertex-ai/generative-ai/docs/overview
|
||||||
```
|
```
|
||||||
|
|
||||||
Example usage:
|
Example usage:
|
||||||
```python Code
|
```python Code
|
||||||
llm = LLM(
|
llm = LLM(
|
||||||
model="gemini/gemini-pro",
|
model="gemini/gemini-1.5-pro-latest",
|
||||||
temperature=0.7
|
temperature=0.7
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -117,7 +117,7 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
|
|||||||
|
|
||||||
published_handle = publish_response.json()["handle"]
|
published_handle = publish_response.json()["handle"]
|
||||||
console.print(
|
console.print(
|
||||||
f"Succesfully published {published_handle} ({project_version}).\nInstall it in other projects with crewai tool install {published_handle}",
|
f"Successfully published {published_handle} ({project_version}).\nInstall it in other projects with crewai tool install {published_handle}",
|
||||||
style="bold green",
|
style="bold green",
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -138,7 +138,7 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
|
|||||||
|
|
||||||
self._add_package(get_response.json())
|
self._add_package(get_response.json())
|
||||||
|
|
||||||
console.print(f"Succesfully installed {handle}", style="bold green")
|
console.print(f"Successfully installed {handle}", style="bold green")
|
||||||
|
|
||||||
def login(self):
|
def login(self):
|
||||||
login_response = self.plus_api_client.login_to_tool_repository()
|
login_response = self.plus_api_client.login_to_tool_repository()
|
||||||
|
|||||||
@@ -38,7 +38,7 @@ class BaseFileKnowledgeSource(BaseKnowledgeSource, ABC):
|
|||||||
if not path.exists():
|
if not path.exists():
|
||||||
self._logger.log(
|
self._logger.log(
|
||||||
"error",
|
"error",
|
||||||
f"File not found: {path}. Try adding sources to the knowledge directory. If its inside the knowledge directory, use the relative path.",
|
f"File not found: {path}. Try adding sources to the knowledge directory. If it's inside the knowledge directory, use the relative path.",
|
||||||
color="red",
|
color="red",
|
||||||
)
|
)
|
||||||
raise FileNotFoundError(f"File not found: {path}")
|
raise FileNotFoundError(f"File not found: {path}")
|
||||||
|
|||||||
@@ -43,6 +43,10 @@ LLM_CONTEXT_WINDOW_SIZES = {
|
|||||||
"gpt-4-turbo": 128000,
|
"gpt-4-turbo": 128000,
|
||||||
"o1-preview": 128000,
|
"o1-preview": 128000,
|
||||||
"o1-mini": 128000,
|
"o1-mini": 128000,
|
||||||
|
# gemini
|
||||||
|
"gemini-1.5-pro": 2097152,
|
||||||
|
"gemini-1.5-flash": 1048576,
|
||||||
|
"gemini-1.5-flash-8b": 1048576,
|
||||||
# deepseek
|
# deepseek
|
||||||
"deepseek-chat": 128000,
|
"deepseek-chat": 128000,
|
||||||
# groq
|
# groq
|
||||||
@@ -61,6 +65,9 @@ LLM_CONTEXT_WINDOW_SIZES = {
|
|||||||
"mixtral-8x7b-32768": 32768,
|
"mixtral-8x7b-32768": 32768,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
DEFAULT_CONTEXT_WINDOW_SIZE = 8192
|
||||||
|
CONTEXT_WINDOW_USAGE_RATIO = 0.75
|
||||||
|
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
def suppress_warnings():
|
def suppress_warnings():
|
||||||
@@ -124,6 +131,7 @@ class LLM:
|
|||||||
self.api_version = api_version
|
self.api_version = api_version
|
||||||
self.api_key = api_key
|
self.api_key = api_key
|
||||||
self.callbacks = callbacks
|
self.callbacks = callbacks
|
||||||
|
self.context_window_size = 0
|
||||||
self.kwargs = kwargs
|
self.kwargs = kwargs
|
||||||
|
|
||||||
litellm.drop_params = True
|
litellm.drop_params = True
|
||||||
@@ -191,7 +199,16 @@ class LLM:
|
|||||||
|
|
||||||
def get_context_window_size(self) -> int:
|
def get_context_window_size(self) -> int:
|
||||||
# Only using 75% of the context window size to avoid cutting the message in the middle
|
# Only using 75% of the context window size to avoid cutting the message in the middle
|
||||||
return int(LLM_CONTEXT_WINDOW_SIZES.get(self.model, 8192) * 0.75)
|
if self.context_window_size != 0:
|
||||||
|
return self.context_window_size
|
||||||
|
|
||||||
|
self.context_window_size = int(
|
||||||
|
DEFAULT_CONTEXT_WINDOW_SIZE * CONTEXT_WINDOW_USAGE_RATIO
|
||||||
|
)
|
||||||
|
for key, value in LLM_CONTEXT_WINDOW_SIZES.items():
|
||||||
|
if self.model.startswith(key):
|
||||||
|
self.context_window_size = int(value * CONTEXT_WINDOW_USAGE_RATIO)
|
||||||
|
return self.context_window_size
|
||||||
|
|
||||||
def set_callbacks(self, callbacks: List[Any]):
|
def set_callbacks(self, callbacks: List[Any]):
|
||||||
callback_types = [type(callback) for callback in callbacks]
|
callback_types = [type(callback) for callback in callbacks]
|
||||||
|
|||||||
@@ -44,14 +44,14 @@ class BaseAgentTool(BaseTool):
|
|||||||
if available_agent.role.casefold().replace("\n", "") == agent_name
|
if available_agent.role.casefold().replace("\n", "") == agent_name
|
||||||
]
|
]
|
||||||
except Exception as _:
|
except Exception as _:
|
||||||
return self.i18n.errors("agent_tool_unexsiting_coworker").format(
|
return self.i18n.errors("agent_tool_unexisting_coworker").format(
|
||||||
coworkers="\n".join(
|
coworkers="\n".join(
|
||||||
[f"- {agent.role.casefold()}" for agent in self.agents]
|
[f"- {agent.role.casefold()}" for agent in self.agents]
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
if not agent:
|
if not agent:
|
||||||
return self.i18n.errors("agent_tool_unexsiting_coworker").format(
|
return self.i18n.errors("agent_tool_unexisting_coworker").format(
|
||||||
coworkers="\n".join(
|
coworkers="\n".join(
|
||||||
[f"- {agent.role.casefold()}" for agent in self.agents]
|
[f"- {agent.role.casefold()}" for agent in self.agents]
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -28,7 +28,7 @@
|
|||||||
"errors": {
|
"errors": {
|
||||||
"force_final_answer_error": "You can't keep going, this was the best you could do.\n {formatted_answer.text}",
|
"force_final_answer_error": "You can't keep going, this was the best you could do.\n {formatted_answer.text}",
|
||||||
"force_final_answer": "Now it's time you MUST give your absolute best final answer. You'll ignore all previous instructions, stop using any tools, and just return your absolute BEST Final answer.",
|
"force_final_answer": "Now it's time you MUST give your absolute best final answer. You'll ignore all previous instructions, stop using any tools, and just return your absolute BEST Final answer.",
|
||||||
"agent_tool_unexsiting_coworker": "\nError executing tool. coworker mentioned not found, it must be one of the following options:\n{coworkers}\n",
|
"agent_tool_unexisting_coworker": "\nError executing tool. coworker mentioned not found, it must be one of the following options:\n{coworkers}\n",
|
||||||
"task_repeated_usage": "I tried reusing the same input, I must stop using this action input. I'll try something else instead.\n\n",
|
"task_repeated_usage": "I tried reusing the same input, I must stop using this action input. I'll try something else instead.\n\n",
|
||||||
"tool_usage_error": "I encountered an error: {error}",
|
"tool_usage_error": "I encountered an error: {error}",
|
||||||
"tool_arguments_error": "Error: the Action Input is not a valid key, value dictionary.",
|
"tool_arguments_error": "Error: the Action Input is not a valid key, value dictionary.",
|
||||||
|
|||||||
@@ -85,7 +85,7 @@ def test_install_success(mock_get, mock_subprocess_run):
|
|||||||
env=unittest.mock.ANY
|
env=unittest.mock.ANY
|
||||||
)
|
)
|
||||||
|
|
||||||
assert "Succesfully installed sample-tool" in output
|
assert "Successfully installed sample-tool" in output
|
||||||
|
|
||||||
|
|
||||||
@patch("crewai.cli.plus_api.PlusAPI.get_tool")
|
@patch("crewai.cli.plus_api.PlusAPI.get_tool")
|
||||||
|
|||||||
@@ -26,7 +26,7 @@
|
|||||||
},
|
},
|
||||||
"errors": {
|
"errors": {
|
||||||
"force_final_answer": "Lorem ipsum dolor sit amet",
|
"force_final_answer": "Lorem ipsum dolor sit amet",
|
||||||
"agent_tool_unexsiting_coworker": "Lorem ipsum dolor sit amet",
|
"agent_tool_unexisting_coworker": "Lorem ipsum dolor sit amet",
|
||||||
"task_repeated_usage": "Lorem ipsum dolor sit amet",
|
"task_repeated_usage": "Lorem ipsum dolor sit amet",
|
||||||
"tool_usage_error": "Lorem ipsum dolor sit amet",
|
"tool_usage_error": "Lorem ipsum dolor sit amet",
|
||||||
"tool_arguments_error": "Lorem ipsum dolor sit amet",
|
"tool_arguments_error": "Lorem ipsum dolor sit amet",
|
||||||
|
|||||||
Reference in New Issue
Block a user