Refactor Codebase to Use Pydantic v2 and Enhance Type Hints, Documentation (#24)

Update to Pydantic v2:

Transitioned all references from pydantic.v1 to pydantic (v2), ensuring compatibility with the latest Pydantic features and improvements.
Affected components include agent tools, prompts, crew, and task modules.
Refactoring & Alignment with Pydantic Standards:

Refactored the agent module away from traditional __init__ to align more closely with Pydantic best practices.
Updated the crew module to Pydantic v2 and enhanced configurations, allowing JSON and dictionary inputs. Additionally, some (not all) exceptions have been migrated to leverage Pydantic's error-handling capabilities.
Enhancements to Validators and Typings:

Improved validators and type annotations across multiple modules, enhancing code readability and maintainability.
Streamlined the validation process in line with Pydantic v2's methodologies.
Import and Configuration Adjustments:

Updated to test-related absolute imports due to issues with Pytest finding packages through relative imports.
This commit is contained in:
Greyson LaLonde
2023-12-29 19:24:30 -05:00
committed by GitHub
parent 3b5515c5c2
commit 5cc230263c
11 changed files with 157 additions and 112 deletions

3
.gitignore vendored
View File

@@ -3,4 +3,5 @@
__pycache__
dist/
.env
assets/*
assets/*
.idea

View File

View File

@@ -1,5 +1,4 @@
"""Generic agent."""
from typing import Any, List, Optional
from langchain.agents import AgentExecutor
@@ -8,14 +7,13 @@ from langchain.agents.output_parsers import ReActSingleInputOutputParser
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationSummaryMemory
from langchain.tools.render import render_text_description
from pydantic.v1 import BaseModel, Field, PrivateAttr, root_validator
from pydantic import BaseModel, Field, InstanceOf, PrivateAttr, model_validator
from .prompts import Prompts
class Agent(BaseModel):
"""
Represents an agent in a system.
"""Represents an agent in a system.
Each agent has a role, a goal, a backstory, and an optional language model (llm).
The agent can also have memory, can operate in verbose mode, and can delegate tasks to other agents.
@@ -31,31 +29,45 @@ class Agent(BaseModel):
allow_delegation: Whether the agent is allowed to delegate tasks to other agents.
"""
agent_executor: AgentExecutor = None
agent_executor: Optional[InstanceOf[AgentExecutor]] = Field(
default=None, description="An instance of the AgentExecutor class."
)
role: str = Field(description="Role of the agent")
goal: str = Field(description="Objective of the agent")
backstory: str = Field(description="Backstory of the agent")
llm: Optional[Any] = Field(description="LLM that will run the agent")
llm: Optional[Any] = Field(
default_factory=lambda: ChatOpenAI(
temperature=0.7,
model_name="gpt-4",
),
description="Language model that will run the agent.",
)
memory: bool = Field(
description="Whether the agent should have memory or not", default=True
default=True, description="Whether the agent should have memory or not"
)
verbose: bool = Field(
description="Verbose mode for the Agent Execution", default=False
default=False, description="Verbose mode for the Agent Execution"
)
allow_delegation: bool = Field(
description="Allow delegation of tasks to agents", default=True
default=True, description="Allow delegation of tasks to agents"
)
tools: List[Any] = Field(
default_factory=list, description="Tools at agents disposal"
)
tools: List[Any] = Field(description="Tools at agents disposal", default=[])
_task_calls: List[Any] = PrivateAttr()
@root_validator(pre=True)
def check_llm(_cls, values):
if not values.get("llm"):
values["llm"] = ChatOpenAI(temperature=0.7, model_name="gpt-4")
return values
@model_validator(mode="after")
def check_agent_executor(self) -> "Agent":
if not self.agent_executor:
self.agent_executor = self._create_agent_executor()
return self
def __init__(self, **data):
super().__init__(**data)
def _create_agent_executor(self) -> AgentExecutor:
"""Create an agent executor for the agent.
Returns:
An instance of the AgentExecutor class.
"""
agent_args = {
"input": lambda x: x["input"],
"tools": lambda x: x["tools"],
@@ -89,17 +101,20 @@ class Agent(BaseModel):
agent_args | execution_prompt | bind | ReActSingleInputOutputParser()
)
self.agent_executor = AgentExecutor(agent=inner_agent, **executor_args)
return AgentExecutor(agent=inner_agent, **executor_args)
def execute_task(
self, task: str, context: str = None, tools: List[Any] = None
) -> str:
"""
Execute a task with the agent.
Parameters:
task (str): Task to execute
Returns:
output (str): Output of the agent
"""Execute a task with the agent.
Args:
task: Task to execute.
context: Context to execute the task in.
tools: Tools to use for the task.
Returns:
Output of the agent
"""
if context:
task = "\n".join(
@@ -116,5 +131,6 @@ class Agent(BaseModel):
}
)["output"]
def __tools_names(self, tools) -> str:
@staticmethod
def __tools_names(tools) -> str:
return ", ".join([t.name for t in tools])

View File

@@ -1,7 +1,8 @@
import json
from typing import List, Optional
from typing import Any, Dict, List, Optional, Union
from pydantic.v1 import BaseModel, Field, Json, root_validator
from pydantic import BaseModel, Field, Json, field_validator, model_validator
from pydantic_core import PydanticCustomError
from .agent import Agent
from .process import Process
@@ -10,62 +11,69 @@ from .tools.agent_tools import AgentTools
class Crew(BaseModel):
"""
Class that represents a group of agents, how they should work together and
their tasks.
"""
"""Class that represents a group of agents, how they should work together and their tasks."""
tasks: Optional[List[Task]] = Field(description="List of tasks")
agents: Optional[List[Agent]] = Field(description="List of agents in this crew.")
tasks: List[Task] = Field(description="List of tasks", default_factory=list)
agents: List[Agent] = Field(
description="List of agents in this crew.", default_factory=list
)
process: Process = Field(
description="Process that the crew will follow.", default=Process.sequential
)
verbose: bool = Field(
description="Verbose mode for the Agent Execution", default=False
)
config: Optional[Json] = Field(
config: Optional[Union[Json, Dict[str, Any]]] = Field(
description="Configuration of the crew.", default=None
)
@root_validator(pre=True)
def check_config(_cls, values):
if not values.get("config") and (
not values.get("agents") and not values.get("tasks")
):
raise ValueError("Either agents and task need to be set or config.")
@classmethod
@field_validator("config", mode="before")
def check_config_type(cls, v: Union[Json, Dict[str, Any]]):
if isinstance(v, Json):
return json.loads(v)
return v
if values.get("config"):
config = json.loads(values.get("config"))
if not config.get("agents") or not config.get("tasks"):
raise ValueError("Config should have agents and tasks.")
@model_validator(mode="after")
def check_config(self):
if not self.config and not self.tasks and not self.agents:
raise PydanticCustomError(
"missing_keys", "Either agents and task need to be set or config.", {}
)
values["agents"] = [Agent(**agent) for agent in config["agents"]]
if self.config:
if not self.config.get("agents") or not self.config.get("tasks"):
raise PydanticCustomError(
"missing_keys_in_config", "Config should have agents and tasks", {}
)
self.agents = [Agent(**agent) for agent in self.config["agents"]]
tasks = []
for task in config["tasks"]:
task_agent = [
agt for agt in values["agents"] if agt.role == task["agent"]
][0]
for task in self.config["tasks"]:
task_agent = [agt for agt in self.agents if agt.role == task["agent"]][
0
]
del task["agent"]
tasks.append(Task(**task, agent=task_agent))
values["tasks"] = tasks
return values
self.tasks = tasks
return self
def kickoff(self) -> str:
"""
Kickoff the crew to work on it's tasks.
Returns:
output (List[str]): Output of the crew for each task.
"""Kickoff the crew to work on its tasks.
Returns:
Output of the crew for each task.
"""
if self.process == Process.sequential:
return self.__sequential_loop()
def __sequential_loop(self) -> str:
"""
Loop that executes the sequential process.
Returns:
output (str): Output of the crew.
"""Loop that executes the sequential process.
Returns:
Output of the crew.
"""
task_outcome = None
for task in self.tasks:

View File

@@ -4,7 +4,7 @@ from textwrap import dedent
from typing import ClassVar
from langchain.prompts import PromptTemplate
from pydantic.v1 import BaseModel
from pydantic import BaseModel
class Prompts(BaseModel):

View File

@@ -1,35 +1,33 @@
from typing import List, Optional
from typing import Any, List, Optional
from langchain.tools import Tool
from pydantic.v1 import BaseModel, Field, root_validator
from pydantic import BaseModel, Field, model_validator
from .agent import Agent
class Task(BaseModel):
"""
Class that represent a task to be executed.
"""
"""Class that represent a task to be executed."""
description: str = Field(description="Description of the actual task.")
agent: Optional[Agent] = Field(
description="Agent responsible for the task.", default=None
)
tools: Optional[List[Tool]] = Field(
description="Tools the agent are limited to use for this task.", default=[]
tools: List[Any] = Field(
default_factory=list,
description="Tools the agent are limited to use for this task.",
)
@root_validator(pre=False)
def _set_tools(_cls, values):
if (values.get("agent")) and not (values.get("tools")):
values["tools"] = values.get("agent").tools
return values
@model_validator(mode="after")
def check_tools(self):
if not self.tools and (self.agent and self.agent.tools):
self.tools.extend(self.agent.tools)
return self
def execute(self, context: str = None) -> str:
"""
Execute the task.
Returns:
output (str): Output of the task.
"""Execute the task.
Returns:
Output of the task.
"""
if self.agent:
return self.agent.execute_task(

View File

@@ -2,7 +2,7 @@ from textwrap import dedent
from typing import List
from langchain.tools import Tool
from pydantic.v1 import BaseModel, Field
from pydantic import BaseModel, Field
from ..agent import Agent

View File

@@ -3,7 +3,7 @@
import pytest
from langchain.chat_models import ChatOpenAI as OpenAI
from ..crewai import Agent
from crewai.agent import Agent
def test_agent_creation():

View File

@@ -1,56 +1,74 @@
"""Test Agent creation and execution basic functionality."""
import pytest
from ...crewai import Agent
from ...crewai.tools.agent_tools import AgentTools
from crewai.agent import Agent
from crewai.tools.agent_tools import AgentTools
researcher = Agent(
role="researcher",
goal="make the best research and analysis on content about AI and AI agents",
backstory="You're an expert researcher, specialized in technology",
allow_delegation=False
role="researcher",
goal="make the best research and analysis on content about AI and AI agents",
backstory="You're an expert researcher, specialized in technology",
allow_delegation=False,
)
tools = AgentTools(agents=[researcher])
@pytest.mark.vcr(filter_headers=["authorization"])
def test_delegate_work():
result = tools.delegate_work(
command="researcher|share your take on AI Agents|I heard you hate them"
)
result = tools.delegate_work(
command="researcher|share your take on AI Agents|I heard you hate them"
)
assert (
result
== "I apologize if my previous statements have given you the impression that I hate AI agents. As a technology researcher, I don't hold personal sentiments towards AI or any other technology. Rather, I analyze them objectively based on their capabilities, applications, and implications. AI agents, in particular, are a fascinating domain of research. They hold tremendous potential in automating and optimizing various tasks across industries. However, like any other technology, they come with their own set of challenges, such as ethical considerations around privacy and decision-making. My objective is to understand these technologies in depth and provide a balanced view."
)
assert result == "I apologize if my previous statements have given you the impression that I hate AI agents. As a technology researcher, I don't hold personal sentiments towards AI or any other technology. Rather, I analyze them objectively based on their capabilities, applications, and implications. AI agents, in particular, are a fascinating domain of research. They hold tremendous potential in automating and optimizing various tasks across industries. However, like any other technology, they come with their own set of challenges, such as ethical considerations around privacy and decision-making. My objective is to understand these technologies in depth and provide a balanced view."
@pytest.mark.vcr(filter_headers=["authorization"])
def test_ask_question():
result = tools.ask_question(
command="researcher|do you hate AI Agents?|I heard you LOVE them"
)
result = tools.ask_question(
command="researcher|do you hate AI Agents?|I heard you LOVE them"
)
assert (
result
== "As an AI, I don't possess feelings or emotions, so I don't love or hate anything. However, I can provide detailed analysis and research on AI agents. They are a fascinating field of study with the potential to revolutionize many industries, although they also present certain challenges and ethical considerations."
)
assert result == "As an AI, I don't possess feelings or emotions, so I don't love or hate anything. However, I can provide detailed analysis and research on AI agents. They are a fascinating field of study with the potential to revolutionize many industries, although they also present certain challenges and ethical considerations."
def test_can_not_self_delegate():
# TODO: Add test for self delegation
pass
# TODO: Add test for self delegation
pass
def test_delegate_work_with_wrong_input():
result = tools.ask_question(
command="writer|share your take on AI Agents"
)
result = tools.ask_question(command="writer|share your take on AI Agents")
assert (
result
== "Error executing tool. Missing exact 3 pipe (|) separated values. For example, `coworker|task|information`."
)
assert result == "Error executing tool. Missing exact 3 pipe (|) separated values. For example, `coworker|task|information`."
def test_delegate_work_to_wrong_agent():
result = tools.ask_question(
command="writer|share your take on AI Agents|I heard you hate them"
)
result = tools.ask_question(
command="writer|share your take on AI Agents|I heard you hate them"
)
assert (
result
== "Error executing tool. Co-worker not found, double check the co-worker."
)
assert result == "Error executing tool. Co-worker not found, double check the co-worker."
def test_ask_question_to_wrong_agent():
result = tools.ask_question(
command="writer|do you hate AI Agents?|I heard you LOVE them"
)
assert result == "Error executing tool. Co-worker not found, double check the co-worker."
result = tools.ask_question(
command="writer|do you hate AI Agents?|I heard you LOVE them"
)
assert (
result
== "Error executing tool. Co-worker not found, double check the co-worker."
)

View File

@@ -4,7 +4,10 @@ import json
import pytest
from ..crewai import Agent, Crew, Process, Task
from crewai.agent import Agent
from crewai.crew import Crew
from crewai.process import Process
from crewai.task import Task
ceo = Agent(
role="CEO",

View File

@@ -1,7 +1,8 @@
"""Test Agent creation and execution basic functionality."""
from ..crewai import Agent, Task
from crewai.agent import Agent
from crewai.task import Task
def test_task_tool_reflect_agent_tools():