mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-09 16:18:30 +00:00
updating code to usinf pydantic v1
This commit is contained in:
@@ -1,7 +1,7 @@
|
||||
"""Generic agent."""
|
||||
|
||||
from typing import List
|
||||
from pydantic import BaseModel, Field
|
||||
from pydantic.v1 import BaseModel, Field
|
||||
|
||||
from langchain.tools import Tool
|
||||
from langchain.agents import AgentExecutor
|
||||
@@ -16,7 +16,6 @@ from langchain.memory import (
|
||||
)
|
||||
|
||||
from .prompts import Prompts
|
||||
from .agents.agent_vote import AgentVote
|
||||
|
||||
class Agent(BaseModel):
|
||||
"""Generic agent implementation."""
|
||||
@@ -28,11 +27,7 @@ class Agent(BaseModel):
|
||||
description="Tools at agents disposal",
|
||||
default=[]
|
||||
)
|
||||
prompts: Prompts = Field(
|
||||
description="Prompts class for the agent.",
|
||||
default=Prompts
|
||||
)
|
||||
llm: str = Field(
|
||||
llm: OpenAI = Field(
|
||||
description="LLM that will run the agent",
|
||||
default=OpenAI(
|
||||
temperature=0.7,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from pydantic import BaseModel, Field
|
||||
from pydantic.v1 import BaseModel, Field
|
||||
|
||||
class AgentVote(BaseModel):
|
||||
task: str = Field(description="Task to be executed by the agent")
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from typing import List
|
||||
from pydantic import BaseModel, Field
|
||||
from pydantic.v1 import BaseModel, Field
|
||||
|
||||
from .process import Process
|
||||
from .agent import Agent
|
||||
@@ -21,4 +21,19 @@ class Crew(BaseModel):
|
||||
Returns:
|
||||
output (List[str]): Output of the crew for each task.
|
||||
"""
|
||||
# if self.process == Process.consensual:
|
||||
|
||||
return "Crew is executing task"
|
||||
|
||||
def __consensual_loop(self) -> str:
|
||||
"""
|
||||
Loop that executes the consensual process.
|
||||
Returns:
|
||||
output (str): Output of the crew.
|
||||
"""
|
||||
|
||||
# The group of agents need to decide which agent will execute each task
|
||||
# in the self.task list. This is done by a voting process between all the
|
||||
# agents in self.agents. The agent with the most votes will execute the
|
||||
# task.
|
||||
pass
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
from textwrap import dedent
|
||||
from typing import ClassVar
|
||||
from pydantic import BaseModel
|
||||
from pydantic.v1 import BaseModel
|
||||
from langchain.prompts import PromptTemplate
|
||||
|
||||
class Prompts(BaseModel):
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from typing import List, Optional
|
||||
from pydantic import BaseModel, Field, model_validator
|
||||
from pydantic.v1 import BaseModel, Field
|
||||
from pydantic import model_validator
|
||||
|
||||
from langchain.tools import Tool
|
||||
|
||||
|
||||
@@ -30,6 +30,21 @@ def test_agent_default_value():
|
||||
assert agent.llm.temperature == 0.7
|
||||
assert agent.llm.verbose == True
|
||||
|
||||
def test_custom_llm():
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
llm=OpenAI(
|
||||
temperature=0,
|
||||
model="gpt-3.5"
|
||||
)
|
||||
)
|
||||
|
||||
assert isinstance(agent.llm, OpenAI)
|
||||
assert agent.llm.model_name == "gpt-3.5"
|
||||
assert agent.llm.temperature == 0
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_execution():
|
||||
agent = Agent(
|
||||
@@ -38,5 +53,5 @@ def test_agent_execution():
|
||||
backstory="test backstory"
|
||||
)
|
||||
|
||||
output = agent.execute("How much is 1 + 1?")
|
||||
assert output == "2"
|
||||
output = agent.execute_task("How much is 1 + 1?")
|
||||
assert output == "1 + 1 equals 2."
|
||||
@@ -1,15 +1,29 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
main goal is: test goal\n\nTOOLS:\n------\n\nYou have access to the following
|
||||
tools:\n\n\n\nTo use a tool, please use the following format:\n\n```\nThought:
|
||||
Do I need to use a tool? Yes\nAction: the action to take, should be one of []\nAction
|
||||
Input: the input to the action\nObservation: the result of the action\n```\n\nWhen
|
||||
you have a response for your task, or if you do not need to use a tool, you
|
||||
MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer:
|
||||
[your response here]\n```\n\nBegin!\n\nCurrent Task: How much is 1 + 1?\n\n"}],
|
||||
"model": "gpt-4", "max_tokens": null, "stream": false, "n": 1, "temperature":
|
||||
0.7, "stop": ["\nObservation"]}'
|
||||
body: '{"messages": [{"role": "user", "content": "You are an AI assistant reading
|
||||
the transcript of a conversation between an AI and a human. Extract all of the
|
||||
proper nouns from the last line of conversation. As a guideline, a proper noun
|
||||
is generally capitalized. You should definitely extract all names and places.\n\nThe
|
||||
conversation history is provided just in case of a coreference (e.g. \"What
|
||||
do you know about him\" where \"him\" is defined in a previous line) -- ignore
|
||||
items mentioned there that are not in the last line.\n\nReturn the output as
|
||||
a single comma-separated list, or NONE if there is nothing of note to return
|
||||
(e.g. the user is just issuing a greeting or having a simple conversation).\n\nEXAMPLE\nConversation
|
||||
history:\nPerson #1: how''s it going today?\nAI: \"It''s going great! How about
|
||||
you?\"\nPerson #1: good! busy working on Langchain. lots to do.\nAI: \"That
|
||||
sounds like a lot of work! What kind of things are you doing to make Langchain
|
||||
better?\"\nLast line:\nPerson #1: i''m trying to improve Langchain''s interfaces,
|
||||
the UX, its integrations with various products the user might want ... a lot
|
||||
of stuff.\nOutput: Langchain\nEND OF EXAMPLE\n\nEXAMPLE\nConversation history:\nPerson
|
||||
#1: how''s it going today?\nAI: \"It''s going great! How about you?\"\nPerson
|
||||
#1: good! busy working on Langchain. lots to do.\nAI: \"That sounds like a lot
|
||||
of work! What kind of things are you doing to make Langchain better?\"\nLast
|
||||
line:\nPerson #1: i''m trying to improve Langchain''s interfaces, the UX, its
|
||||
integrations with various products the user might want ... a lot of stuff. I''m
|
||||
working with Person #2.\nOutput: Langchain, Person #2\nEND OF EXAMPLE\n\nConversation
|
||||
history (for reference only):\n\nLast line of conversation (for extraction):\nHuman:
|
||||
How much is 1 + 1?\n\nOutput:"}], "model": "gpt-4", "max_tokens": null, "stream":
|
||||
false, "n": 1, "temperature": 0.7}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
@@ -20,7 +34,7 @@ interactions:
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '758'
|
||||
- '1879'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
@@ -35,17 +49,16 @@ interactions:
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SQS0/DMBCE7/kVqz23KGmbkOaCgCoSHCohcQNUGWdJXBxviLfipf535PTJxYf5
|
||||
dsaz+xsBoKmwANSNEt12dpyXORuh2U/9vUiXDzflVOLSre8/ktssx1Fw8OuatBxcF5rbzpIYdjus
|
||||
e1JCITXJ5nmWzNN4NoCWK7LBVncyno3jLJnuHQ0bTR4LeIoAAH6HN3RzFX1hAfHooLTkvaoJi+MQ
|
||||
APZsg4LKe+NFOcHRCWp2Qm6o+9jwpm6kgAXDHTiiCoRh4wkUCLO9giU/u9I4ZeHa+U/qC5jgPmp7
|
||||
7PBmnPHNqifl2YVcL9ztxrYRwMuw0+ZfTex6bjtZCb+TC4smabrLw9P5zujlHgqLsuf6JAqfbKM/
|
||||
AAAA//8DAD/1vhO7AQAA
|
||||
H4sIAAAAAAAAA1SQT2uDQBDF736KYc5a1ASN3lootJcU+gdCSwmbdRo36q64k9IS/O5l1cT0Mof5
|
||||
vffmMScPAFWBOaAsBcumrYPVAz0Wv693t1VJz2/lJtm9yPj4Hm8Wh+8KfecwuwNJPrtupGnamlgZ
|
||||
PWLZkWByqVGSZXGURmk2gMYUVDvbvuVgGYRJtJgcpVGSLObw4QEAnIbpuumCfjCH0D9vGrJW7Anz
|
||||
iwgAO1O7DQprlWWhGf0ZSqOZ9FB3/bS+x4n0l8gvpZUttx0Ja7STWTbtKOs9gM+h4vHfVWw707S8
|
||||
ZVORdr2XYTrm4fyNmUYTY8OivjatPHej9/4AAAD//wMAIl8yO4kBAAA=
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 81def25b4addf8f1-NVT
|
||||
- 8217f2097fed01af-GRU
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
@@ -55,7 +68,7 @@ interactions:
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sun, 29 Oct 2023 22:45:05 GMT
|
||||
- Sun, 05 Nov 2023 20:46:20 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -69,7 +82,7 @@ interactions:
|
||||
openai-organization:
|
||||
- clearbit-2
|
||||
openai-processing-ms:
|
||||
- '950'
|
||||
- '847'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
@@ -81,13 +94,200 @@ interactions:
|
||||
x-ratelimit-remaining-requests:
|
||||
- '99'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149840'
|
||||
- '149557'
|
||||
x-ratelimit-reset-requests:
|
||||
- 600ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 64ms
|
||||
- 177ms
|
||||
x-request-id:
|
||||
- b6484701bea444a976e49f353baf7be5
|
||||
- c0e542a54029d73256f7615e6edc08e7
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "You are test role.\ntest backstory\n\nYour
|
||||
main goal is: test goal\nTOOLS:\n------\n\nYou have access to the following
|
||||
tools:\n\n\n\nTo use a tool, please use the following format:\n\n```\nThought:
|
||||
Do I need to use a tool? Yes\nAction: the action to take, should be one of []\nAction
|
||||
Input: the input to the action\nObservation: the result of the action\n```\n\nWhen
|
||||
you have a response for your task, or if you do not need to use a tool, you
|
||||
MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer:
|
||||
[your response here]\n```\n\t\tThis is the summary of your work so far:\n \n\n This
|
||||
is your understanding of the current situation:\n {}\nBegin!\n\nCurrent
|
||||
Task: How much is 1 + 1?\n\n"}], "model": "gpt-4", "max_tokens": null, "stream":
|
||||
false, "n": 1, "temperature": 0.7, "stop": ["\nObservation"]}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Authorization:
|
||||
- Bearer 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
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '886'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- OpenAI/v1 PythonBindings/0.28.1
|
||||
X-OpenAI-Client-User-Agent:
|
||||
- '{"bindings_version": "0.28.1", "httplib": "requests", "lang": "python", "lang_version":
|
||||
"3.9.7", "platform": "macOS-10.16-x86_64-i386-64bit", "publisher": "openai",
|
||||
"uname": "Darwin 22.4.0 Darwin Kernel Version 22.4.0: Mon Mar 6 21:00:17
|
||||
PST 2023; root:xnu-8796.101.5~3/RELEASE_X86_64 x86_64"}'
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1SQQU/CQBCF7/0Vk70KpFsQaC+GhKhcPBiMJmrIuh3ble1O7U6jgfS/my0IetnD
|
||||
++a9ebP7CECYXGQgdKlYV7Udzm9xhY87Xj3QzqbLuNzePNH6fny5MKjFIDjo7QM1/7pGmqraIhty
|
||||
B6wbVIwhVU7TNJEzOY97UFGONtiKmoeTYTyV46OjJKPRiwyeIwCAff+Gbi7Hb5FB7++VCr1XBYrs
|
||||
NAQgGrJBEcp741k5FoMz1OQYXV93XVJblJzBkmAFDjEHJmg9ggImsldwRy/u2jhlYeH8FzYZSLgA
|
||||
CfjZKushGYljcHdq9G6c8eWmQeXJhS2eqT6MdRHAa39h+6+0qBuqat4wbdGFs+UsPeSJ82eeaTI5
|
||||
QiZW9o8ej6OwpIt+AAAA//8DAHOHxGfJAQAA
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8217f210daec01af-GRU
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sun, 05 Nov 2023 20:46:21 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- clearbit-2
|
||||
openai-processing-ms:
|
||||
- '1086'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '100'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '99'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149810'
|
||||
x-ratelimit-reset-requests:
|
||||
- 600ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 76ms
|
||||
x-request-id:
|
||||
- ebeb778dd7c0273460cdb5e24fdbfe7c
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages": [{"role": "user", "content": "Progressively summarize the
|
||||
lines of conversation provided, adding onto the previous summary returning a
|
||||
new summary.\n\nEXAMPLE\nCurrent summary:\nThe human asks what the AI thinks
|
||||
of artificial intelligence. The AI thinks artificial intelligence is a force
|
||||
for good.\n\nNew lines of conversation:\nHuman: Why do you think artificial
|
||||
intelligence is a force for good?\nAI: Because artificial intelligence will
|
||||
help humans reach their full potential.\n\nNew summary:\nThe human asks what
|
||||
the AI thinks of artificial intelligence. The AI thinks artificial intelligence
|
||||
is a force for good because it will help humans reach their full potential.\nEND
|
||||
OF EXAMPLE\n\nCurrent summary:\n\n\nNew lines of conversation:\nHuman: How much
|
||||
is 1 + 1?\nAI: 1 + 1 equals 2.\n\nNew summary:"}], "model": "gpt-4", "max_tokens":
|
||||
null, "stream": false, "n": 1, "temperature": 0.7}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Authorization:
|
||||
- Bearer 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
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '905'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- OpenAI/v1 PythonBindings/0.28.1
|
||||
X-OpenAI-Client-User-Agent:
|
||||
- '{"bindings_version": "0.28.1", "httplib": "requests", "lang": "python", "lang_version":
|
||||
"3.9.7", "platform": "macOS-10.16-x86_64-i386-64bit", "publisher": "openai",
|
||||
"uname": "Darwin 22.4.0 Darwin Kernel Version 22.4.0: Mon Mar 6 21:00:17
|
||||
PST 2023; root:xnu-8796.101.5~3/RELEASE_X86_64 x86_64"}'
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA1RQQW7CMBC85xUrXxsQpiWF3LhUpaIHJDhVFXKdBbvEdshuVBDi75VDgPayh52Z
|
||||
nZk9JQDCFiIHoY1i7aqyN37F2WaxfDlmfGz2q6NbLQ6H5m1az+v3uUijInx9o+arqq+Dq0pkG/wF
|
||||
1jUqxnhVZpPJUD7LsWwBFwoso2xbce+pN8jkY6cwwWokkcNHAgBwamfM5gs8iBwG6XXjkEhtUeQ3
|
||||
EoCoQxk3QhFZYuVZpHdQB8/o27hLg2Aapzwo2hGwQZjOQAHZ2ACcYgP7Bil2SYED/BirzZVXI1XB
|
||||
F1GnGCQ8gATcN6okGPZFZ3i+Jd1Yb8msa1QUfHQnDtWFdk4APtvmzb8yoqqDq3jNYYc+vkOOsss9
|
||||
cX/yHR2OOpADq/KPaiyTaHJOfgEAAP//AwADfIQK4QEAAA==
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8217f219bdf601af-GRU
|
||||
Cache-Control:
|
||||
- no-cache, must-revalidate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sun, 05 Nov 2023 20:46:23 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
access-control-allow-origin:
|
||||
- '*'
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-model:
|
||||
- gpt-4-0613
|
||||
openai-organization:
|
||||
- clearbit-2
|
||||
openai-processing-ms:
|
||||
- '1196'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15724800; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '100'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '99'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149793'
|
||||
x-ratelimit-reset-requests:
|
||||
- 600ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 82ms
|
||||
x-request-id:
|
||||
- 088729b81730273860e6434d8eb6ed57
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
|
||||
34
tests/crew_test.py
Normal file
34
tests/crew_test.py
Normal file
@@ -0,0 +1,34 @@
|
||||
"""Test Agent creation and execution basic functionality."""
|
||||
|
||||
import pytest
|
||||
from ..crewai import Agent, Crew, Task, Process
|
||||
|
||||
def test_crew_creation():
|
||||
agent_CTO = Agent(
|
||||
role="CTO",
|
||||
goal="Help your team craft the most amazing apps ever made.",
|
||||
backstory="You're world class CTO that works on the best web consulting agency."
|
||||
)
|
||||
agent_QA = Agent(
|
||||
role="QA Engineer",
|
||||
goal="Make sure ship the best software possible with the highest quality",
|
||||
backstory="You're the best at QA in the whole team, you are known for catching all bugs and advocate for improvements."
|
||||
)
|
||||
agent_Eng = Agent(
|
||||
role="Web Engineer",
|
||||
goal="Build amazing websites by writing high quality html, css and js.",
|
||||
backstory="You're great at vanila JS, CSS and HTMl, you got hired to build amazing website using your skills."
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Build a landing page for a website that sells dog food."
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[agent_CTO, agent_Eng, agent_QA],
|
||||
goal="Build amazing landing pages.",
|
||||
tasks=[task],
|
||||
process=Process.consensual
|
||||
)
|
||||
|
||||
assert crew.kickoff() == 'lol'
|
||||
Reference in New Issue
Block a user