Merge remote-tracking branch 'upstream/main'

This commit is contained in:
Braelyn Boynton
2024-05-06 11:50:31 -07:00
11 changed files with 187 additions and 3635 deletions

5
.gitignore vendored
View File

@@ -8,4 +8,7 @@ assets/*
test/
docs_crew/
chroma.sqlite3
old_en.json
old_en.json
db/
test.py
rc-tests/*

View File

@@ -54,7 +54,7 @@ OPENAI_API_KEY=''
## Ollama Integration (ex. for using Llama 2 locally)
1. [Download Ollama](https://ollama.com/download).
2. After setting up the Ollama, Pull the Llama2 by typing following lines into the terminal ```ollama pull Llama2```.
2. After setting up the Ollama, Pull the Llama2 by typing following lines into the terminal ```ollama pull llama2```.
3. Create a ModelFile similar the one below in your project directory.
```
FROM llama2

View File

@@ -41,7 +41,7 @@ Note: Substitute 'https://docs.example.com/reference' with your target documenta
By default, the tool uses OpenAI for both embeddings and summarization. To customize the model, you can use a config dictionary as follows:
```python
tool = YoutubeVideoSearchTool(
tool = CodeDocsSearchTool(
config=dict(
llm=dict(
provider="ollama", # or google, openai, anthropic, llama2, ...

825
poetry.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
[tool.poetry]
name = "crewai"
version = "0.30.0rc3"
version = "0.30.0rc6"
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
authors = ["Joao Moura <joao@crewai.com>"]
readme = "README.md"
@@ -23,7 +23,7 @@ opentelemetry-sdk = "^1.22.0"
opentelemetry-exporter-otlp-proto-http = "^1.22.0"
instructor = "^0.5.2"
regex = "^2023.12.25"
crewai-tools = { version = "^0.2.2", optional = true }
crewai-tools = { version = "^0.2.3", optional = true }
click = "^8.1.7"
python-dotenv = "^1.0.0"
embedchain = "^0.1.98"
@@ -46,7 +46,7 @@ mkdocs-material = {extras = ["imaging"], version = "^9.5.7"}
mkdocs-material-extensions = "^1.3.1"
pillow = "^10.2.0"
cairosvg = "^2.7.1"
crewai-tools = "^0.2.2"
crewai-tools = "^0.2.3"
[tool.isort]
profile = "black"

View File

@@ -41,6 +41,7 @@ class Crew(BaseModel):
tasks: List of tasks assigned to the crew.
agents: List of agents part of this crew.
manager_llm: The language model that will run manager agent.
manager_agent: Custom agent that will be used as manager.
memory: Whether the crew should use memory to store memories of it's execution.
manager_callbacks: The callback handlers to be executed by the manager agent when hierarchical process is used
cache: Whether the crew should use a cache to store the results of the tools execution.
@@ -175,14 +176,23 @@ class Crew(BaseModel):
@model_validator(mode="after")
def check_manager_llm(self):
"""Validates that the language model is set when using hierarchical process."""
if self.process == Process.hierarchical and (
not self.manager_llm and not self.manager_agent
):
raise PydanticCustomError(
"missing_manager_llm",
"Attribute `manager_llm` is required when using hierarchical process.",
{},
)
if self.process == Process.hierarchical:
if not self.manager_llm and not self.manager_agent:
raise PydanticCustomError(
"missing_manager_llm_or_manager_agent",
"Attribute `manager_llm` or `manager_agent` is required when using hierarchical process.",
{},
)
if (self.manager_agent is not None) and (
self.agents.count(self.manager_agent) > 0
):
raise PydanticCustomError(
"manager_agent_in_agents",
"Manager agent should not be included in agents list.",
{},
)
return self
@model_validator(mode="after")
@@ -316,12 +326,13 @@ class Crew(BaseModel):
"""Creates and assigns a manager agent to make sure the crew completes the tasks."""
i18n = I18N(prompt_file=self.prompt_file)
try:
self.manager_agent.allow_delegation = (
True # Forcing Allow delegation to the manager
)
if self.manager_agent is not None:
self.manager_agent.allow_delegation = True
manager = self.manager_agent
except:
if len(manager.tools) > 0:
raise Exception("Manager agent should not have tools")
manager.tools = AgentTools(agents=self.agents).tools()
else:
manager = Agent(
role=i18n.retrieve("hierarchical_manager_agent", "role"),
goal=i18n.retrieve("hierarchical_manager_agent", "goal"),

View File

@@ -1,14 +1,28 @@
tasks_order = []
def memoize(func):
cache = {}
def memoized_func(*args, **kwargs):
key = (args, tuple(kwargs.items()))
if key not in cache:
cache[key] = func(*args, **kwargs)
return cache[key]
return memoized_func
def task(func):
func.is_task = True
tasks_order.append(func.__name__)
func = memoize(func)
return func
def agent(func):
func.is_agent = True
func = memoize(func)
return func

View File

@@ -1,7 +1,7 @@
import ast
from difflib import SequenceMatcher
from textwrap import dedent
from typing import Any, List, Union
from difflib import SequenceMatcher
from langchain_core.tools import BaseTool
from langchain_openai import ChatOpenAI
@@ -330,7 +330,7 @@ class ToolUsage:
return calling
def _validate_tool_input(self, tool_input: str) -> dict:
def _validate_tool_input(self, tool_input: str) -> str:
try:
ast.literal_eval(tool_input)
return tool_input
@@ -351,13 +351,17 @@ class ToolUsage:
continue # Skip malformed entries
key, value = entry.split(":", 1)
key = key.strip().strip(
'"'
) # Remove extraneous white spaces and quotes
# Remove extraneous white spaces and quotes, replace single quotes
key = key.strip().strip('"').replace("'", '"')
value = value.strip()
# Check and format the value based on its type
if value.isdigit(): # Check if value is a digit, hence integer
# Handle replacement of single quotes at the start and end of the value string
if value.startswith("'") and value.endswith("'"):
value = value[1:-1] # Remove single quotes
value = (
'"' + value.replace('"', '\\"') + '"'
) # Re-encapsulate with double quotes
elif value.isdigit(): # Check if value is a digit, hence integer
formatted_value = value
elif value.lower() in [
"true",
@@ -367,7 +371,7 @@ class ToolUsage:
formatted_value = value.lower()
else:
# Assume the value is a string and needs quotes
formatted_value = '"' + value.strip('"').replace('"', '\\"') + '"'
formatted_value = '"' + value.replace('"', '\\"') + '"'
# Rebuild the entry with proper quoting
formatted_entry = f'"{key}": {formatted_value}'

File diff suppressed because it is too large Load Diff

View File

@@ -918,8 +918,6 @@ def test_crew_log_file_output(tmp_path):
def test_manager_agent():
from unittest.mock import patch
from langchain_openai import ChatOpenAI
task = Task(
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
expected_output="5 bullet points with a paragraph for each idea.",
@@ -930,7 +928,6 @@ def test_manager_agent():
goal="Manage the crew and ensure the tasks are completed efficiently.",
backstory="You're an experienced manager, skilled in overseeing complex projects and guiding teams to success. Your role is to coordinate the efforts of the crew members, ensuring that each task is completed on time and to the highest standard.",
allow_delegation=False,
llm=ChatOpenAI(temperature=0, model="gpt-4"),
)
crew = Crew(
@@ -944,3 +941,53 @@ def test_manager_agent():
crew.kickoff()
assert manager.allow_delegation == True
execute.assert_called()
def test_manager_agent_in_agents_raises_exception():
pass
task = Task(
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
expected_output="5 bullet points with a paragraph for each idea.",
)
manager = Agent(
role="Manager",
goal="Manage the crew and ensure the tasks are completed efficiently.",
backstory="You're an experienced manager, skilled in overseeing complex projects and guiding teams to success. Your role is to coordinate the efforts of the crew members, ensuring that each task is completed on time and to the highest standard.",
allow_delegation=False,
)
with pytest.raises(pydantic_core._pydantic_core.ValidationError):
crew = Crew(
agents=[researcher, writer, manager],
process=Process.hierarchical,
manager_agent=manager,
tasks=[task],
)
def test_manager_agent_with_tools_raises_exception():
pass
task = Task(
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
expected_output="5 bullet points with a paragraph for each idea.",
)
manager = Agent(
role="Manager",
goal="Manage the crew and ensure the tasks are completed efficiently.",
backstory="You're an experienced manager, skilled in overseeing complex projects and guiding teams to success. Your role is to coordinate the efforts of the crew members, ensuring that each task is completed on time and to the highest standard.",
allow_delegation=False,
)
crew = Crew(
agents=[researcher, writer],
process=Process.hierarchical,
manager_agent=manager,
tasks=[task],
)
with pytest.raises(Exception):
crew.kickoff()

35
tests/project_test.py Normal file
View File

@@ -0,0 +1,35 @@
from crewai.agent import Agent
from crewai.project import agent, task
from crewai.task import Task
class SimpleCrew:
@agent
def simple_agent(self):
return Agent(
role="Simple Agent", goal="Simple Goal", backstory="Simple Backstory"
)
@task
def simple_task(self):
return Task(description="Simple Description", expected_output="Simple Output")
def test_agent_memoization():
crew = SimpleCrew()
first_call_result = crew.simple_agent()
second_call_result = crew.simple_agent()
assert (
first_call_result is second_call_result
), "Agent memoization is not working as expected"
def test_task_memoization():
crew = SimpleCrew()
first_call_result = crew.simple_task()
second_call_result = crew.simple_task()
assert (
first_call_result is second_call_result
), "Task memoization is not working as expected"