adding new converter logic

This commit is contained in:
João Moura
2024-02-22 15:16:17 -03:00
parent e397a49c23
commit 1c7f9826b4
15 changed files with 6110 additions and 24065 deletions

View File

@@ -241,7 +241,7 @@ def test_agent_custom_max_iterations():
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_repeated_tool_usage(capsys):
@tool
def get_final_answer(numbers) -> float:
def get_final_answer(anything: str) -> float:
"""Get the final answer but don't give it yet, just re-use this
tool non-stop."""
return 42
@@ -251,6 +251,7 @@ def test_agent_repeated_tool_usage(capsys):
goal="test goal",
backstory="test backstory",
max_iter=4,
llm=ChatOpenAI(model="gpt-4-0125-preview"),
allow_delegation=False,
verbose=True,
)
@@ -267,10 +268,7 @@ def test_agent_repeated_tool_usage(capsys):
captured = capsys.readouterr()
assert (
"I have been instructed to give the final answer now, so I will proceed to do so using the exact expected format."
in captured.out
)
assert "Final Answer: 42" in captured.out
@pytest.mark.vcr(filter_headers=["authorization"])
@@ -551,7 +549,7 @@ def test_agent_step_callback():
def test_agent_function_calling_llm():
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-3.5")
llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
with patch.object(llm.client, "create", wraps=llm.client.create) as private_mock:
@@ -565,6 +563,7 @@ def test_agent_function_calling_llm():
goal="test goal",
backstory="test backstory",
tools=[learn_about_AI],
llm=ChatOpenAI(model="gpt-4-0125-preview"),
function_calling_llm=llm,
)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -299,9 +299,10 @@ def test_api_calls_throttling(capsys):
from unittest.mock import patch
from langchain.tools import tool
from langchain_openai import ChatOpenAI
@tool
def get_final_answer(numbers) -> float:
def get_final_answer(anything) -> float:
"""Get the final answer but don't give it yet, just re-use this
tool non-stop."""
return 42
@@ -313,6 +314,7 @@ def test_api_calls_throttling(capsys):
max_iter=5,
allow_delegation=False,
verbose=True,
llm=ChatOpenAI(model="gpt-4-0125-preview"),
)
task = Task(
@@ -501,7 +503,7 @@ def test_crew_function_calling_llm():
from langchain.tools import tool
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-3.5")
llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
with patch.object(llm.client, "create", wraps=llm.client.create) as private_mock:
@@ -514,6 +516,7 @@ def test_crew_function_calling_llm():
role="test role",
goal="test goal",
backstory="test backstory",
llm=ChatOpenAI(model="gpt-4-0125-preview"),
tools=[learn_about_AI],
)

View File

@@ -204,6 +204,8 @@ def test_output_json():
@pytest.mark.vcr(filter_headers=["authorization"])
def test_output_pydantic_to_another_task():
from langchain_openai import ChatOpenAI
class ScoreOutput(BaseModel):
score: int
@@ -212,6 +214,9 @@ def test_output_pydantic_to_another_task():
goal="Score the title",
backstory="You're an expert scorer, specialized in scoring titles.",
allow_delegation=False,
llm=ChatOpenAI(model="gpt-4-0125-preview"),
function_calling_llm=ChatOpenAI(model="gpt-3.5-turbo-0125"),
verbose=True,
)
task1 = Task(
@@ -222,15 +227,15 @@ def test_output_pydantic_to_another_task():
)
task2 = Task(
description="Given the score the title 'The impact of AI in the future of work' got, give me an integer score between 1-5 for the following title: 'Return of the Jedi'",
description="Given the score the title 'The impact of AI in the future of work' got, give me an integer score between 1-5 for the following title: 'Return of the Jedi', you MUST give it a score, use your best judgment",
expected_output="The score of the title.",
output_pydantic=ScoreOutput,
agent=scorer,
)
crew = Crew(agents=[scorer], tasks=[task1, task2])
crew = Crew(agents=[scorer], tasks=[task1, task2], verbose=2)
result = crew.kickoff()
assert 4 == result.score
assert 5 == result.score
@pytest.mark.vcr(filter_headers=["authorization"])