Compare commits

..

22 Commits

Author SHA1 Message Date
Brandon Hancock
3cc5ce610e improve type hintings 2025-01-06 16:10:06 -05:00
Brandon Hancock
dff3e94c14 drop old functions 2025-01-06 16:08:50 -05:00
Brandon Hancock
33ca1e3a9e improve 2025-01-06 14:25:05 -05:00
Brandon Hancock
cea5a28db1 adding tests. cleaing up pr. 2025-01-06 13:12:30 -05:00
Brandon Hancock
fadd423771 Fix more failing tests 2025-01-06 11:07:10 -05:00
Brandon Hancock
5e401e3f79 fix failing tests 2025-01-06 10:46:30 -05:00
Brandon Hancock
0aaa466467 More fixing of types 2025-01-06 10:28:42 -05:00
Brandon Hancock
777448b4b2 Merge branch 'main' into brandon/eng-266-conversation-crew-v1 2025-01-06 10:23:56 -05:00
Brandon Hancock
e253789865 fixing type error 2025-01-06 10:13:34 -05:00
Brandon Hancock
0aae59dc1d fix more type errors 2025-01-03 16:42:15 -05:00
Brandon Hancock
b97c4cf2c6 fix llm_utils.py and other type errors 2025-01-03 16:25:59 -05:00
Brandon Hancock
4a794622c7 Fix linting 2025-01-03 15:25:35 -05:00
Brandon Hancock
104e8bc167 Merge branch 'main' into brandon/eng-266-conversation-crew-v1 2025-01-03 15:24:19 -05:00
Brandon Hancock
9e5d4972b9 everything is working for conversation now 2025-01-03 15:18:33 -05:00
Brandon Hancock (bhancock_ai)
3ba15e8bc9 Merge branch 'main' into brandon/eng-266-conversation-crew-v1 2025-01-03 10:41:16 -05:00
Brandon Hancock
0284095ff8 accessing crew directly instead of through uv commands 2024-12-30 14:43:18 -05:00
Brandon Hancock
bcd838a2ff properly return tool call result 2024-12-30 13:32:53 -05:00
Brandon Hancock
5da6d36dd9 Added in Joaos feedback to steer crew chats back towards the purpose of the crew 2024-12-30 11:19:35 -05:00
Brandon Hancock
0e7aa192c0 its alive!! 2024-12-27 13:57:09 -05:00
Brandon Hancock
2f882d68ad high level chat working 2024-12-27 11:21:40 -05:00
Brandon Hancock
2bf5b15f1e core loop should be working and ready for testing. 2024-12-26 14:18:42 -05:00
Brandon Hancock
1c45f730c6 worked on foundation for new conversational crews. Now going to work on chatting. 2024-12-24 14:10:00 -05:00
10 changed files with 8 additions and 83 deletions

View File

@@ -161,7 +161,6 @@ The CLI will initially prompt for API keys for the following services:
* Groq
* Anthropic
* Google Gemini
* SambaNova
When you select a provider, the CLI will prompt you to enter your API key.

View File

@@ -146,19 +146,6 @@ Here's a detailed breakdown of supported models and their capabilities, you can
Groq is known for its fast inference speeds, making it suitable for real-time applications.
</Tip>
</Tab>
<Tab title="SambaNova">
| Model | Context Window | Best For |
|-------|---------------|-----------|
| Llama 3.1 70B/8B | Up to 131,072 tokens | High-performance, large context tasks |
| Llama 3.1 405B | 8,192 tokens | High-performance and output quality |
| Llama 3.2 Series | 8,192 tokens | General-purpose tasks, multimodal |
| Llama 3.3 70B | Up to 131,072 tokens | High-performance and output quality|
| Qwen2 familly | 8,192 tokens | High-performance and output quality |
<Tip>
[SambaNova](https://cloud.sambanova.ai/) has several models with fast inference speed at full precision.
</Tip>
</Tab>
<Tab title="Others">
| Provider | Context Window | Key Features |
|----------|---------------|--------------|

View File

@@ -134,23 +134,6 @@ crew = Crew(
)
```
## Memory Configuration Options
If you want to access a specific organization and project, you can set the `org_id` and `project_id` parameters in the memory configuration.
```python Code
from crewai import Crew
crew = Crew(
agents=[...],
tasks=[...],
verbose=True,
memory=True,
memory_config={
"provider": "mem0",
"config": {"user_id": "john", "org_id": "my_org_id", "project_id": "my_project_id"},
},
)
```
## Additional Embedding Providers

View File

@@ -32,7 +32,6 @@ LiteLLM supports a wide range of providers, including but not limited to:
- Cloudflare Workers AI
- DeepInfra
- Groq
- SambaNova
- [NVIDIA NIMs](https://docs.api.nvidia.com/nim/reference/models-1)
- And many more!

View File

@@ -26,7 +26,7 @@ class CrewAgentExecutorMixin:
def _should_force_answer(self) -> bool:
"""Determine if a forced answer is required based on iteration count."""
return self.iterations >= self.max_iter
return (self.iterations >= self.max_iter) and not self.have_forced_answer
def _create_short_term_memory(self, output) -> None:
"""Create and save a short-term memory item if conditions are met."""

View File

@@ -85,12 +85,6 @@ ENV_VARS = {
"key_name": "CEREBRAS_API_KEY",
},
],
"sambanova": [
{
"prompt": "Enter your SambaNovaCloud API key (press Enter to skip)",
"key_name": "SAMBANOVA_API_KEY",
}
],
}
@@ -104,7 +98,6 @@ PROVIDERS = [
"bedrock",
"azure",
"cerebras",
"sambanova",
]
MODELS = {
@@ -163,19 +156,6 @@ MODELS = {
"bedrock/mistral.mistral-7b-instruct-v0:2",
"bedrock/mistral.mixtral-8x7b-instruct-v0:1",
],
"sambanova": [
"sambanova/Meta-Llama-3.3-70B-Instruct",
"sambanova/QwQ-32B-Preview",
"sambanova/Qwen2.5-72B-Instruct",
"sambanova/Qwen2.5-Coder-32B-Instruct",
"sambanova/Meta-Llama-3.1-405B-Instruct",
"sambanova/Meta-Llama-3.1-70B-Instruct",
"sambanova/Meta-Llama-3.1-8B-Instruct",
"sambanova/Llama-3.2-90B-Vision-Instruct",
"sambanova/Llama-3.2-11B-Vision-Instruct",
"sambanova/Meta-Llama-3.2-3B-Instruct",
"sambanova/Meta-Llama-3.2-1B-Instruct",
],
}
DEFAULT_LLM_MODEL = "gpt-4o-mini"

View File

@@ -2,7 +2,7 @@ research_task:
description: >
Conduct a thorough research about {topic}
Make sure you find any interesting and relevant information given
the current year is {current_year}.
the current year is 2024.
expected_output: >
A list with 10 bullet points of the most relevant information about {topic}
agent: researcher

View File

@@ -2,8 +2,6 @@
import sys
import warnings
from datetime import datetime
from {{folder_name}}.crew import {{crew_name}}
warnings.filterwarnings("ignore", category=SyntaxWarning, module="pysbd")
@@ -18,8 +16,7 @@ def run():
Run the crew.
"""
inputs = {
'topic': 'AI LLMs',
'current_year': str(datetime.now().year)
'topic': 'AI LLMs'
}
try:

View File

@@ -76,18 +76,6 @@ LLM_CONTEXT_WINDOW_SIZES = {
"mixtral-8x7b-32768": 32768,
"llama-3.3-70b-versatile": 128000,
"llama-3.3-70b-instruct": 128000,
#sambanova
"Meta-Llama-3.3-70B-Instruct": 131072,
"QwQ-32B-Preview": 8192,
"Qwen2.5-72B-Instruct": 8192,
"Qwen2.5-Coder-32B-Instruct": 8192,
"Meta-Llama-3.1-405B-Instruct": 8192,
"Meta-Llama-3.1-70B-Instruct": 131072,
"Meta-Llama-3.1-8B-Instruct": 131072,
"Llama-3.2-90B-Vision-Instruct": 16384,
"Llama-3.2-11B-Vision-Instruct": 16384,
"Meta-Llama-3.2-3B-Instruct": 4096,
"Meta-Llama-3.2-1B-Instruct": 16384,
}
DEFAULT_CONTEXT_WINDOW_SIZE = 8192

View File

@@ -27,18 +27,10 @@ class Mem0Storage(Storage):
raise ValueError("User ID is required for user memory type")
# API key in memory config overrides the environment variable
config = self.memory_config.get("config", {})
mem0_api_key = config.get("api_key") or os.getenv("MEM0_API_KEY")
mem0_org_id = config.get("org_id")
mem0_project_id = config.get("project_id")
# Initialize MemoryClient with available parameters
if mem0_org_id and mem0_project_id:
self.memory = MemoryClient(
api_key=mem0_api_key, org_id=mem0_org_id, project_id=mem0_project_id
)
else:
self.memory = MemoryClient(api_key=mem0_api_key)
mem0_api_key = self.memory_config.get("config", {}).get("api_key") or os.getenv(
"MEM0_API_KEY"
)
self.memory = MemoryClient(api_key=mem0_api_key)
def _sanitize_role(self, role: str) -> str:
"""
@@ -65,7 +57,7 @@ class Mem0Storage(Storage):
metadata={"type": "long_term", **metadata},
)
elif self.memory_type == "entities":
entity_name = self._get_agent_name()
entity_name = None
self.memory.add(
value, user_id=entity_name, metadata={"type": "entity", **metadata}
)