Refactor Codebase to Use Pydantic v2 and Enhance Type Hints, Documentation (#24)

Update to Pydantic v2:

Transitioned all references from pydantic.v1 to pydantic (v2), ensuring compatibility with the latest Pydantic features and improvements.
Affected components include agent tools, prompts, crew, and task modules.
Refactoring & Alignment with Pydantic Standards:

Refactored the agent module away from traditional __init__ to align more closely with Pydantic best practices.
Updated the crew module to Pydantic v2 and enhanced configurations, allowing JSON and dictionary inputs. Additionally, some (not all) exceptions have been migrated to leverage Pydantic's error-handling capabilities.
Enhancements to Validators and Typings:

Improved validators and type annotations across multiple modules, enhancing code readability and maintainability.
Streamlined the validation process in line with Pydantic v2's methodologies.
Import and Configuration Adjustments:

Updated to test-related absolute imports due to issues with Pytest finding packages through relative imports.
This commit is contained in:
Greyson LaLonde
2023-12-29 19:24:30 -05:00
committed by GitHub
parent 3b5515c5c2
commit 5cc230263c
11 changed files with 157 additions and 112 deletions

3
.gitignore vendored
View File

@@ -3,4 +3,5 @@
__pycache__ __pycache__
dist/ dist/
.env .env
assets/* assets/*
.idea

View File

View File

@@ -1,5 +1,4 @@
"""Generic agent.""" """Generic agent."""
from typing import Any, List, Optional from typing import Any, List, Optional
from langchain.agents import AgentExecutor from langchain.agents import AgentExecutor
@@ -8,14 +7,13 @@ from langchain.agents.output_parsers import ReActSingleInputOutputParser
from langchain.chat_models import ChatOpenAI from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationSummaryMemory from langchain.memory import ConversationSummaryMemory
from langchain.tools.render import render_text_description from langchain.tools.render import render_text_description
from pydantic.v1 import BaseModel, Field, PrivateAttr, root_validator from pydantic import BaseModel, Field, InstanceOf, PrivateAttr, model_validator
from .prompts import Prompts from .prompts import Prompts
class Agent(BaseModel): class Agent(BaseModel):
""" """Represents an agent in a system.
Represents an agent in a system.
Each agent has a role, a goal, a backstory, and an optional language model (llm). Each agent has a role, a goal, a backstory, and an optional language model (llm).
The agent can also have memory, can operate in verbose mode, and can delegate tasks to other agents. The agent can also have memory, can operate in verbose mode, and can delegate tasks to other agents.
@@ -31,31 +29,45 @@ class Agent(BaseModel):
allow_delegation: Whether the agent is allowed to delegate tasks to other agents. allow_delegation: Whether the agent is allowed to delegate tasks to other agents.
""" """
agent_executor: AgentExecutor = None agent_executor: Optional[InstanceOf[AgentExecutor]] = Field(
default=None, description="An instance of the AgentExecutor class."
)
role: str = Field(description="Role of the agent") role: str = Field(description="Role of the agent")
goal: str = Field(description="Objective of the agent") goal: str = Field(description="Objective of the agent")
backstory: str = Field(description="Backstory of the agent") backstory: str = Field(description="Backstory of the agent")
llm: Optional[Any] = Field(description="LLM that will run the agent") llm: Optional[Any] = Field(
default_factory=lambda: ChatOpenAI(
temperature=0.7,
model_name="gpt-4",
),
description="Language model that will run the agent.",
)
memory: bool = Field( memory: bool = Field(
description="Whether the agent should have memory or not", default=True default=True, description="Whether the agent should have memory or not"
) )
verbose: bool = Field( verbose: bool = Field(
description="Verbose mode for the Agent Execution", default=False default=False, description="Verbose mode for the Agent Execution"
) )
allow_delegation: bool = Field( allow_delegation: bool = Field(
description="Allow delegation of tasks to agents", default=True default=True, description="Allow delegation of tasks to agents"
)
tools: List[Any] = Field(
default_factory=list, description="Tools at agents disposal"
) )
tools: List[Any] = Field(description="Tools at agents disposal", default=[])
_task_calls: List[Any] = PrivateAttr() _task_calls: List[Any] = PrivateAttr()
@root_validator(pre=True) @model_validator(mode="after")
def check_llm(_cls, values): def check_agent_executor(self) -> "Agent":
if not values.get("llm"): if not self.agent_executor:
values["llm"] = ChatOpenAI(temperature=0.7, model_name="gpt-4") self.agent_executor = self._create_agent_executor()
return values return self
def __init__(self, **data): def _create_agent_executor(self) -> AgentExecutor:
super().__init__(**data) """Create an agent executor for the agent.
Returns:
An instance of the AgentExecutor class.
"""
agent_args = { agent_args = {
"input": lambda x: x["input"], "input": lambda x: x["input"],
"tools": lambda x: x["tools"], "tools": lambda x: x["tools"],
@@ -89,17 +101,20 @@ class Agent(BaseModel):
agent_args | execution_prompt | bind | ReActSingleInputOutputParser() agent_args | execution_prompt | bind | ReActSingleInputOutputParser()
) )
self.agent_executor = AgentExecutor(agent=inner_agent, **executor_args) return AgentExecutor(agent=inner_agent, **executor_args)
def execute_task( def execute_task(
self, task: str, context: str = None, tools: List[Any] = None self, task: str, context: str = None, tools: List[Any] = None
) -> str: ) -> str:
""" """Execute a task with the agent.
Execute a task with the agent.
Parameters: Args:
task (str): Task to execute task: Task to execute.
Returns: context: Context to execute the task in.
output (str): Output of the agent tools: Tools to use for the task.
Returns:
Output of the agent
""" """
if context: if context:
task = "\n".join( task = "\n".join(
@@ -116,5 +131,6 @@ class Agent(BaseModel):
} }
)["output"] )["output"]
def __tools_names(self, tools) -> str: @staticmethod
def __tools_names(tools) -> str:
return ", ".join([t.name for t in tools]) return ", ".join([t.name for t in tools])

View File

@@ -1,7 +1,8 @@
import json import json
from typing import List, Optional from typing import Any, Dict, List, Optional, Union
from pydantic.v1 import BaseModel, Field, Json, root_validator from pydantic import BaseModel, Field, Json, field_validator, model_validator
from pydantic_core import PydanticCustomError
from .agent import Agent from .agent import Agent
from .process import Process from .process import Process
@@ -10,62 +11,69 @@ from .tools.agent_tools import AgentTools
class Crew(BaseModel): class Crew(BaseModel):
""" """Class that represents a group of agents, how they should work together and their tasks."""
Class that represents a group of agents, how they should work together and
their tasks.
"""
tasks: Optional[List[Task]] = Field(description="List of tasks") tasks: List[Task] = Field(description="List of tasks", default_factory=list)
agents: Optional[List[Agent]] = Field(description="List of agents in this crew.") agents: List[Agent] = Field(
description="List of agents in this crew.", default_factory=list
)
process: Process = Field( process: Process = Field(
description="Process that the crew will follow.", default=Process.sequential description="Process that the crew will follow.", default=Process.sequential
) )
verbose: bool = Field( verbose: bool = Field(
description="Verbose mode for the Agent Execution", default=False description="Verbose mode for the Agent Execution", default=False
) )
config: Optional[Json] = Field( config: Optional[Union[Json, Dict[str, Any]]] = Field(
description="Configuration of the crew.", default=None description="Configuration of the crew.", default=None
) )
@root_validator(pre=True) @classmethod
def check_config(_cls, values): @field_validator("config", mode="before")
if not values.get("config") and ( def check_config_type(cls, v: Union[Json, Dict[str, Any]]):
not values.get("agents") and not values.get("tasks") if isinstance(v, Json):
): return json.loads(v)
raise ValueError("Either agents and task need to be set or config.") return v
if values.get("config"): @model_validator(mode="after")
config = json.loads(values.get("config")) def check_config(self):
if not config.get("agents") or not config.get("tasks"): if not self.config and not self.tasks and not self.agents:
raise ValueError("Config should have agents and tasks.") raise PydanticCustomError(
"missing_keys", "Either agents and task need to be set or config.", {}
)
values["agents"] = [Agent(**agent) for agent in config["agents"]] if self.config:
if not self.config.get("agents") or not self.config.get("tasks"):
raise PydanticCustomError(
"missing_keys_in_config", "Config should have agents and tasks", {}
)
self.agents = [Agent(**agent) for agent in self.config["agents"]]
tasks = [] tasks = []
for task in config["tasks"]: for task in self.config["tasks"]:
task_agent = [ task_agent = [agt for agt in self.agents if agt.role == task["agent"]][
agt for agt in values["agents"] if agt.role == task["agent"] 0
][0] ]
del task["agent"] del task["agent"]
tasks.append(Task(**task, agent=task_agent)) tasks.append(Task(**task, agent=task_agent))
values["tasks"] = tasks self.tasks = tasks
return values return self
def kickoff(self) -> str: def kickoff(self) -> str:
""" """Kickoff the crew to work on its tasks.
Kickoff the crew to work on it's tasks.
Returns: Returns:
output (List[str]): Output of the crew for each task. Output of the crew for each task.
""" """
if self.process == Process.sequential: if self.process == Process.sequential:
return self.__sequential_loop() return self.__sequential_loop()
def __sequential_loop(self) -> str: def __sequential_loop(self) -> str:
""" """Loop that executes the sequential process.
Loop that executes the sequential process.
Returns: Returns:
output (str): Output of the crew. Output of the crew.
""" """
task_outcome = None task_outcome = None
for task in self.tasks: for task in self.tasks:

View File

@@ -4,7 +4,7 @@ from textwrap import dedent
from typing import ClassVar from typing import ClassVar
from langchain.prompts import PromptTemplate from langchain.prompts import PromptTemplate
from pydantic.v1 import BaseModel from pydantic import BaseModel
class Prompts(BaseModel): class Prompts(BaseModel):

View File

@@ -1,35 +1,33 @@
from typing import List, Optional from typing import Any, List, Optional
from langchain.tools import Tool from pydantic import BaseModel, Field, model_validator
from pydantic.v1 import BaseModel, Field, root_validator
from .agent import Agent from .agent import Agent
class Task(BaseModel): class Task(BaseModel):
""" """Class that represent a task to be executed."""
Class that represent a task to be executed.
"""
description: str = Field(description="Description of the actual task.") description: str = Field(description="Description of the actual task.")
agent: Optional[Agent] = Field( agent: Optional[Agent] = Field(
description="Agent responsible for the task.", default=None description="Agent responsible for the task.", default=None
) )
tools: Optional[List[Tool]] = Field( tools: List[Any] = Field(
description="Tools the agent are limited to use for this task.", default=[] default_factory=list,
description="Tools the agent are limited to use for this task.",
) )
@root_validator(pre=False) @model_validator(mode="after")
def _set_tools(_cls, values): def check_tools(self):
if (values.get("agent")) and not (values.get("tools")): if not self.tools and (self.agent and self.agent.tools):
values["tools"] = values.get("agent").tools self.tools.extend(self.agent.tools)
return values return self
def execute(self, context: str = None) -> str: def execute(self, context: str = None) -> str:
""" """Execute the task.
Execute the task.
Returns: Returns:
output (str): Output of the task. Output of the task.
""" """
if self.agent: if self.agent:
return self.agent.execute_task( return self.agent.execute_task(

View File

@@ -2,7 +2,7 @@ from textwrap import dedent
from typing import List from typing import List
from langchain.tools import Tool from langchain.tools import Tool
from pydantic.v1 import BaseModel, Field from pydantic import BaseModel, Field
from ..agent import Agent from ..agent import Agent

View File

@@ -3,7 +3,7 @@
import pytest import pytest
from langchain.chat_models import ChatOpenAI as OpenAI from langchain.chat_models import ChatOpenAI as OpenAI
from ..crewai import Agent from crewai.agent import Agent
def test_agent_creation(): def test_agent_creation():

View File

@@ -1,56 +1,74 @@
"""Test Agent creation and execution basic functionality.""" """Test Agent creation and execution basic functionality."""
import pytest import pytest
from ...crewai import Agent
from ...crewai.tools.agent_tools import AgentTools from crewai.agent import Agent
from crewai.tools.agent_tools import AgentTools
researcher = Agent( researcher = Agent(
role="researcher", role="researcher",
goal="make the best research and analysis on content about AI and AI agents", goal="make the best research and analysis on content about AI and AI agents",
backstory="You're an expert researcher, specialized in technology", backstory="You're an expert researcher, specialized in technology",
allow_delegation=False allow_delegation=False,
) )
tools = AgentTools(agents=[researcher]) tools = AgentTools(agents=[researcher])
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_delegate_work(): def test_delegate_work():
result = tools.delegate_work( result = tools.delegate_work(
command="researcher|share your take on AI Agents|I heard you hate them" command="researcher|share your take on AI Agents|I heard you hate them"
) )
assert (
result
== "I apologize if my previous statements have given you the impression that I hate AI agents. As a technology researcher, I don't hold personal sentiments towards AI or any other technology. Rather, I analyze them objectively based on their capabilities, applications, and implications. AI agents, in particular, are a fascinating domain of research. They hold tremendous potential in automating and optimizing various tasks across industries. However, like any other technology, they come with their own set of challenges, such as ethical considerations around privacy and decision-making. My objective is to understand these technologies in depth and provide a balanced view."
)
assert result == "I apologize if my previous statements have given you the impression that I hate AI agents. As a technology researcher, I don't hold personal sentiments towards AI or any other technology. Rather, I analyze them objectively based on their capabilities, applications, and implications. AI agents, in particular, are a fascinating domain of research. They hold tremendous potential in automating and optimizing various tasks across industries. However, like any other technology, they come with their own set of challenges, such as ethical considerations around privacy and decision-making. My objective is to understand these technologies in depth and provide a balanced view."
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_ask_question(): def test_ask_question():
result = tools.ask_question( result = tools.ask_question(
command="researcher|do you hate AI Agents?|I heard you LOVE them" command="researcher|do you hate AI Agents?|I heard you LOVE them"
) )
assert (
result
== "As an AI, I don't possess feelings or emotions, so I don't love or hate anything. However, I can provide detailed analysis and research on AI agents. They are a fascinating field of study with the potential to revolutionize many industries, although they also present certain challenges and ethical considerations."
)
assert result == "As an AI, I don't possess feelings or emotions, so I don't love or hate anything. However, I can provide detailed analysis and research on AI agents. They are a fascinating field of study with the potential to revolutionize many industries, although they also present certain challenges and ethical considerations."
def test_can_not_self_delegate(): def test_can_not_self_delegate():
# TODO: Add test for self delegation # TODO: Add test for self delegation
pass pass
def test_delegate_work_with_wrong_input(): def test_delegate_work_with_wrong_input():
result = tools.ask_question( result = tools.ask_question(command="writer|share your take on AI Agents")
command="writer|share your take on AI Agents"
) assert (
result
== "Error executing tool. Missing exact 3 pipe (|) separated values. For example, `coworker|task|information`."
)
assert result == "Error executing tool. Missing exact 3 pipe (|) separated values. For example, `coworker|task|information`."
def test_delegate_work_to_wrong_agent(): def test_delegate_work_to_wrong_agent():
result = tools.ask_question( result = tools.ask_question(
command="writer|share your take on AI Agents|I heard you hate them" command="writer|share your take on AI Agents|I heard you hate them"
) )
assert (
result
== "Error executing tool. Co-worker not found, double check the co-worker."
)
assert result == "Error executing tool. Co-worker not found, double check the co-worker."
def test_ask_question_to_wrong_agent(): def test_ask_question_to_wrong_agent():
result = tools.ask_question( result = tools.ask_question(
command="writer|do you hate AI Agents?|I heard you LOVE them" command="writer|do you hate AI Agents?|I heard you LOVE them"
) )
assert result == "Error executing tool. Co-worker not found, double check the co-worker."
assert (
result
== "Error executing tool. Co-worker not found, double check the co-worker."
)

View File

@@ -4,7 +4,10 @@ import json
import pytest import pytest
from ..crewai import Agent, Crew, Process, Task from crewai.agent import Agent
from crewai.crew import Crew
from crewai.process import Process
from crewai.task import Task
ceo = Agent( ceo = Agent(
role="CEO", role="CEO",

View File

@@ -1,7 +1,8 @@
"""Test Agent creation and execution basic functionality.""" """Test Agent creation and execution basic functionality."""
from ..crewai import Agent, Task from crewai.agent import Agent
from crewai.task import Task
def test_task_tool_reflect_agent_tools(): def test_task_tool_reflect_agent_tools():