mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-08 23:58:34 +00:00
Performed spell check across the rest of code base, and enahnced the yaml paraser code a little (#895)
* Performed spell check across the entire documentation Thank you once again! * Performed spell check across the most of code base Folders been checked: - agents - cli - memory - project - tasks - telemetry - tools - translations * Trying to add a max_token for the agents, so they limited by number of tokens. * Performed spell check across the rest of code base, and enahnced the yaml paraser code a little * Small change in the main agent doc * Improve _save_file method to handle both dict and str inputs - Add check for dict type input - Use json.dump for dict serialization - Convert non-dict inputs to string - Remove type ignore comments --------- Co-authored-by: João Moura <joaomdmoura@gmail.com>
This commit is contained in:
@@ -114,7 +114,7 @@ from langchain.agents import load_tools
|
||||
langchain_tools = load_tools(["google-serper"], llm=llm)
|
||||
|
||||
agent1 = CustomAgent(
|
||||
role="backstory agent",
|
||||
role="agent role",
|
||||
goal="who is {input}?",
|
||||
backstory="agent backstory",
|
||||
verbose=True,
|
||||
@@ -127,7 +127,7 @@ task1 = Task(
|
||||
)
|
||||
|
||||
agent2 = Agent(
|
||||
role="bio agent",
|
||||
role="agent role",
|
||||
goal="summarize the short bio for {input} and if needed do more research",
|
||||
backstory="agent backstory",
|
||||
verbose=True,
|
||||
|
||||
@@ -55,11 +55,6 @@ class Agent(BaseAgent):
|
||||
tools: Tools at agents disposal
|
||||
step_callback: Callback to be executed after each step of the agent execution.
|
||||
callbacks: A list of callback functions from the langchain library that are triggered during the agent's execution process
|
||||
allow_code_execution: Enable code execution for the agent.
|
||||
max_retry_limit: Maximum number of retries for an agent to execute a task when an error occurs.
|
||||
"""
|
||||
|
||||
_times_executed: int = PrivateAttr(default=0)
|
||||
max_execution_time: Optional[int] = Field(
|
||||
default=None,
|
||||
description="Maximum execution time for an agent to execute a task",
|
||||
@@ -191,20 +186,6 @@ class Agent(BaseAgent):
|
||||
else:
|
||||
task_prompt = self._use_trained_data(task_prompt=task_prompt)
|
||||
|
||||
try:
|
||||
result = self.agent_executor.invoke(
|
||||
{
|
||||
"input": task_prompt,
|
||||
"tool_names": self.agent_executor.tools_names,
|
||||
"tools": self.agent_executor.tools_description,
|
||||
}
|
||||
)["output"]
|
||||
except Exception as e:
|
||||
self._times_executed += 1
|
||||
if self._times_executed > self.max_retry_limit:
|
||||
raise e
|
||||
result = self.execute_task(task, context, tools)
|
||||
|
||||
if self.max_rpm:
|
||||
self._rpm_controller.stop_rpm_counter()
|
||||
|
||||
@@ -262,6 +243,7 @@ class Agent(BaseAgent):
|
||||
"tools_handler": self.tools_handler,
|
||||
"function_calling_llm": self.function_calling_llm,
|
||||
"callbacks": self.callbacks,
|
||||
"max_tokens": self.max_tokens,
|
||||
}
|
||||
|
||||
if self._rpm_controller:
|
||||
|
||||
@@ -45,6 +45,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
i18n (I18N): Internationalization settings.
|
||||
cache_handler (InstanceOf[CacheHandler]): An instance of the CacheHandler class.
|
||||
tools_handler (InstanceOf[ToolsHandler]): An instance of the ToolsHandler class.
|
||||
max_tokens: Maximum number of tokens for the agent to generate in a response.
|
||||
|
||||
|
||||
Methods:
|
||||
@@ -118,6 +119,9 @@ class BaseAgent(ABC, BaseModel):
|
||||
tools_handler: InstanceOf[ToolsHandler] = Field(
|
||||
default=None, description="An instance of the ToolsHandler class."
|
||||
)
|
||||
max_tokens: Optional[int] = Field(
|
||||
default=None, description="Maximum number of tokens for the agent's execution."
|
||||
)
|
||||
|
||||
_original_role: str | None = None
|
||||
_original_goal: str | None = None
|
||||
|
||||
@@ -122,7 +122,7 @@ class Task(BaseModel):
|
||||
|
||||
@field_validator("output_file")
|
||||
@classmethod
|
||||
def output_file_validattion(cls, value: str) -> str:
|
||||
def output_file_validation(cls, value: str) -> str:
|
||||
"""Validate the output file path by removing the / from the beginning of the path."""
|
||||
if value.startswith("/"):
|
||||
return value[1:]
|
||||
|
||||
@@ -54,12 +54,12 @@ class TaskEvaluator:
|
||||
def __init__(self, original_agent):
|
||||
self.llm = original_agent.llm
|
||||
|
||||
def evaluate(self, task, ouput) -> TaskEvaluation:
|
||||
def evaluate(self, task, output) -> TaskEvaluation:
|
||||
evaluation_query = (
|
||||
f"Assess the quality of the task completed based on the description, expected output, and actual results.\n\n"
|
||||
f"Task Description:\n{task.description}\n\n"
|
||||
f"Expected Output:\n{task.expected_output}\n\n"
|
||||
f"Actual Output:\n{ouput}\n\n"
|
||||
f"Actual Output:\n{output}\n\n"
|
||||
"Please provide:\n"
|
||||
"- Bullet points suggestions to improve future similar tasks\n"
|
||||
"- A score from 0 to 10 evaluating on completion, quality, and overall performance"
|
||||
|
||||
@@ -1,17 +1,28 @@
|
||||
import re
|
||||
|
||||
|
||||
class YamlParser:
|
||||
@staticmethod
|
||||
def parse(file):
|
||||
"""
|
||||
Parses a YAML file, modifies specific patterns, and checks for unsupported 'context' usage.
|
||||
Args:
|
||||
file (file object): The YAML file to parse.
|
||||
Returns:
|
||||
str: The modified content of the YAML file.
|
||||
Raises:
|
||||
ValueError: If 'context:' is used incorrectly.
|
||||
"""
|
||||
content = file.read()
|
||||
|
||||
# Replace single { and } with doubled ones, while leaving already doubled ones intact and the other special characters {# and {%
|
||||
modified_content = re.sub(r"(?<!\{){(?!\{)(?!\#)(?!\%)", "{{", content)
|
||||
modified_content = re.sub(
|
||||
r"(?<!\})(?<!\%)(?<!\#)\}(?!})", "}}", modified_content
|
||||
)
|
||||
modified_content = re.sub(r"(?<!\})(?<!\%)(?<!\#)\}(?!})", "}}", modified_content)
|
||||
|
||||
# Check for 'context:' not followed by '[' and raise an error
|
||||
if re.search(r"context:(?!\s*\[)", modified_content):
|
||||
raise ValueError(
|
||||
"Context is currently only supported in code when creating a task. Please use the 'context' key in the task configuration."
|
||||
"Context is currently only supported in code when creating a task. "
|
||||
"Please use the 'context' key in the task configuration."
|
||||
)
|
||||
|
||||
return modified_content
|
||||
|
||||
@@ -397,7 +397,7 @@ def test_agent_moved_on_after_max_iterations():
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool over and over until you're told you can give yout final answer.",
|
||||
description="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool over and over until you're told you can give your final answer.",
|
||||
expected_output="The final answer",
|
||||
)
|
||||
output = agent.execute_task(
|
||||
@@ -948,7 +948,7 @@ def test_agent_use_trained_data(crew_training_handler):
|
||||
crew_training_handler().load.return_value = {
|
||||
agent.role: {
|
||||
"suggestions": [
|
||||
"The result of the math operatio must be right.",
|
||||
"The result of the math operation must be right.",
|
||||
"Result must be better than 1.",
|
||||
]
|
||||
}
|
||||
@@ -958,7 +958,7 @@ def test_agent_use_trained_data(crew_training_handler):
|
||||
|
||||
assert (
|
||||
result == "What is 1 + 1?You MUST follow these feedbacks: \n "
|
||||
"The result of the math operatio must be right.\n - Result must be better than 1."
|
||||
"The result of the math operation must be right.\n - Result must be better than 1."
|
||||
)
|
||||
crew_training_handler.assert_has_calls(
|
||||
[mock.call(), mock.call("trained_agents_data.pkl"), mock.call().load()]
|
||||
|
||||
@@ -69,7 +69,7 @@ def test_crew_config_conditional_requirement():
|
||||
"agent": "Senior Researcher",
|
||||
},
|
||||
{
|
||||
"description": "Write a 1 amazing paragraph highlight for each idead that showcases how good an article about this topic could be, check references if necessary or search for more content but make sure it's unique, interesting and well written. Return the list of ideas with their paragraph and your notes.",
|
||||
"description": "Write a 1 amazing paragraph highlight for each idea that showcases how good an article about this topic could be, check references if necessary or search for more content but make sure it's unique, interesting and well written. Return the list of ideas with their paragraph and your notes.",
|
||||
"expected_output": "A 4 paragraph article about AI.",
|
||||
"agent": "Senior Writer",
|
||||
},
|
||||
@@ -572,6 +572,47 @@ def test_api_calls_throttling(capsys):
|
||||
moveon.assert_called()
|
||||
|
||||
|
||||
# This test is not consistent, some issue is happening on the CI when it comes to Prompt tokens
|
||||
# {'usage_metrics': {'completion_tokens': 34, 'prompt_tokens': 0, 'successful_requests': 2, 'total_tokens': 34}} CI OUTPUT
|
||||
# {'usage_metrics': {'completion_tokens': 34, 'prompt_tokens': 314, 'successful_requests': 2, 'total_tokens': 348}}
|
||||
# The issue might be related to the calculate_usage_metrics function
|
||||
# @pytest.mark.vcr(filter_headers=["authorization"])
|
||||
# def test_crew_full_output():
|
||||
# agent = Agent(
|
||||
# role="test role",
|
||||
# goal="test goal",
|
||||
# backstory="test backstory",
|
||||
# allow_delegation=False,
|
||||
# verbose=True,
|
||||
# )
|
||||
|
||||
# task1 = Task(
|
||||
# description="just say hi!",
|
||||
# expected_output="your greeting",
|
||||
# agent=agent,
|
||||
# )
|
||||
# task2 = Task(
|
||||
# description="just say hello!",
|
||||
# expected_output="your greeting",
|
||||
# agent=agent,
|
||||
# )
|
||||
|
||||
# crew = Crew(agents=[agent], tasks=[task1, task2], full_output=True)
|
||||
|
||||
# result = crew.kickoff()
|
||||
|
||||
# assert result == {
|
||||
# "final_output": "Hello!",
|
||||
# "tasks_outputs": [task1.output, task2.output],
|
||||
# "usage_metrics": {
|
||||
# "total_tokens": 348,
|
||||
# "prompt_tokens": 314,
|
||||
# "completion_tokens": 34,
|
||||
# "successful_requests": 2,
|
||||
# },
|
||||
# }
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_kickoff_usage_metrics():
|
||||
inputs = [
|
||||
@@ -1192,7 +1233,7 @@ def test_task_with_no_arguments():
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Look at the available data nd give me a sense on the total number of sales.",
|
||||
description="Look at the available data and give me a sense on the total number of sales.",
|
||||
expected_output="The total number of sales as an integer",
|
||||
agent=researcher,
|
||||
)
|
||||
@@ -1239,7 +1280,7 @@ def test_delegation_is_not_enabled_if_there_are_only_one_agent():
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Look at the available data nd give me a sense on the total number of sales.",
|
||||
description="Look at the available data and give me a sense on the total number of sales.",
|
||||
expected_output="The total number of sales as an integer",
|
||||
agent=researcher,
|
||||
)
|
||||
@@ -1602,16 +1643,16 @@ def test_tools_with_custom_caching():
|
||||
|
||||
writer1 = Agent(
|
||||
role="Writer",
|
||||
goal="You write lesssons of math for kids.",
|
||||
backstory="You're an expert in writting and you love to teach kids but you know nothing of math.",
|
||||
goal="You write lessons of math for kids.",
|
||||
backstory="You're an expert in writing and you love to teach kids but you know nothing of math.",
|
||||
tools=[multiplcation_tool],
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
writer2 = Agent(
|
||||
role="Writer",
|
||||
goal="You write lesssons of math for kids.",
|
||||
backstory="You're an expert in writting and you love to teach kids but you know nothing of math.",
|
||||
goal="You write lessons of math for kids.",
|
||||
backstory="You're an expert in writing and you love to teach kids but you know nothing of math.",
|
||||
tools=[multiplcation_tool],
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
@@ -109,7 +109,7 @@ def test_task_callback():
|
||||
task_completed.assert_called_once_with(task.output)
|
||||
|
||||
|
||||
def test_task_callback_returns_task_ouput():
|
||||
def test_task_callback_returns_task_output():
|
||||
from crewai.tasks.output_format import OutputFormat
|
||||
|
||||
researcher = Agent(
|
||||
|
||||
Reference in New Issue
Block a user