Performed spell check across the rest of code base, and enahnced the yaml paraser code a little (#895)

* Performed spell check across the entire documentation

Thank you once again!

* Performed spell check across the most of code base
Folders been checked:
- agents
- cli
- memory
- project
- tasks
- telemetry
- tools
- translations

* Trying to add a max_token for the agents, so they limited by number of tokens.

* Performed spell check across the rest of code base, and enahnced the yaml paraser code a little

* Small change in the main agent doc

* Improve _save_file method to handle both dict and str inputs

- Add check for dict type input
- Use json.dump for dict serialization
- Convert non-dict inputs to string
- Remove type ignore comments

---------

Co-authored-by: João Moura <joaomdmoura@gmail.com>
This commit is contained in:
Taleb
2024-07-28 21:39:54 +03:00
committed by GitHub
parent 218b17f70f
commit ca9deaebb7
9 changed files with 78 additions and 40 deletions

View File

@@ -397,7 +397,7 @@ def test_agent_moved_on_after_max_iterations():
)
task = Task(
description="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool over and over until you're told you can give yout final answer.",
description="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool over and over until you're told you can give your final answer.",
expected_output="The final answer",
)
output = agent.execute_task(
@@ -948,7 +948,7 @@ def test_agent_use_trained_data(crew_training_handler):
crew_training_handler().load.return_value = {
agent.role: {
"suggestions": [
"The result of the math operatio must be right.",
"The result of the math operation must be right.",
"Result must be better than 1.",
]
}
@@ -958,7 +958,7 @@ def test_agent_use_trained_data(crew_training_handler):
assert (
result == "What is 1 + 1?You MUST follow these feedbacks: \n "
"The result of the math operatio must be right.\n - Result must be better than 1."
"The result of the math operation must be right.\n - Result must be better than 1."
)
crew_training_handler.assert_has_calls(
[mock.call(), mock.call("trained_agents_data.pkl"), mock.call().load()]

View File

@@ -69,7 +69,7 @@ def test_crew_config_conditional_requirement():
"agent": "Senior Researcher",
},
{
"description": "Write a 1 amazing paragraph highlight for each idead that showcases how good an article about this topic could be, check references if necessary or search for more content but make sure it's unique, interesting and well written. Return the list of ideas with their paragraph and your notes.",
"description": "Write a 1 amazing paragraph highlight for each idea that showcases how good an article about this topic could be, check references if necessary or search for more content but make sure it's unique, interesting and well written. Return the list of ideas with their paragraph and your notes.",
"expected_output": "A 4 paragraph article about AI.",
"agent": "Senior Writer",
},
@@ -572,6 +572,47 @@ def test_api_calls_throttling(capsys):
moveon.assert_called()
# This test is not consistent, some issue is happening on the CI when it comes to Prompt tokens
# {'usage_metrics': {'completion_tokens': 34, 'prompt_tokens': 0, 'successful_requests': 2, 'total_tokens': 34}} CI OUTPUT
# {'usage_metrics': {'completion_tokens': 34, 'prompt_tokens': 314, 'successful_requests': 2, 'total_tokens': 348}}
# The issue might be related to the calculate_usage_metrics function
# @pytest.mark.vcr(filter_headers=["authorization"])
# def test_crew_full_output():
# agent = Agent(
# role="test role",
# goal="test goal",
# backstory="test backstory",
# allow_delegation=False,
# verbose=True,
# )
# task1 = Task(
# description="just say hi!",
# expected_output="your greeting",
# agent=agent,
# )
# task2 = Task(
# description="just say hello!",
# expected_output="your greeting",
# agent=agent,
# )
# crew = Crew(agents=[agent], tasks=[task1, task2], full_output=True)
# result = crew.kickoff()
# assert result == {
# "final_output": "Hello!",
# "tasks_outputs": [task1.output, task2.output],
# "usage_metrics": {
# "total_tokens": 348,
# "prompt_tokens": 314,
# "completion_tokens": 34,
# "successful_requests": 2,
# },
# }
@pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_kickoff_usage_metrics():
inputs = [
@@ -1192,7 +1233,7 @@ def test_task_with_no_arguments():
)
task = Task(
description="Look at the available data nd give me a sense on the total number of sales.",
description="Look at the available data and give me a sense on the total number of sales.",
expected_output="The total number of sales as an integer",
agent=researcher,
)
@@ -1239,7 +1280,7 @@ def test_delegation_is_not_enabled_if_there_are_only_one_agent():
)
task = Task(
description="Look at the available data nd give me a sense on the total number of sales.",
description="Look at the available data and give me a sense on the total number of sales.",
expected_output="The total number of sales as an integer",
agent=researcher,
)
@@ -1602,16 +1643,16 @@ def test_tools_with_custom_caching():
writer1 = Agent(
role="Writer",
goal="You write lesssons of math for kids.",
backstory="You're an expert in writting and you love to teach kids but you know nothing of math.",
goal="You write lessons of math for kids.",
backstory="You're an expert in writing and you love to teach kids but you know nothing of math.",
tools=[multiplcation_tool],
allow_delegation=False,
)
writer2 = Agent(
role="Writer",
goal="You write lesssons of math for kids.",
backstory="You're an expert in writting and you love to teach kids but you know nothing of math.",
goal="You write lessons of math for kids.",
backstory="You're an expert in writing and you love to teach kids but you know nothing of math.",
tools=[multiplcation_tool],
allow_delegation=False,
)

View File

@@ -109,7 +109,7 @@ def test_task_callback():
task_completed.assert_called_once_with(task.output)
def test_task_callback_returns_task_ouput():
def test_task_callback_returns_task_output():
from crewai.tasks.output_format import OutputFormat
researcher = Agent(