mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-08 15:48:29 +00:00
The majority of tasks are working now. Need to fix converter class
This commit is contained in:
@@ -254,6 +254,27 @@ class Crew(BaseModel):
|
||||
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_end_with_at_most_one_async_task(self):
|
||||
"""Validates that the crew ends with at most one asynchronous task."""
|
||||
final_async_task_count = 0
|
||||
|
||||
# Traverse tasks backward
|
||||
for task in reversed(self.tasks):
|
||||
if task.async_execution:
|
||||
final_async_task_count += 1
|
||||
else:
|
||||
break # Stop traversing as soon as a non-async task is encountered
|
||||
|
||||
if final_async_task_count > 1:
|
||||
raise PydanticCustomError(
|
||||
"async_task_count",
|
||||
"The crew must end with at most one asynchronous task.",
|
||||
{},
|
||||
)
|
||||
|
||||
return self
|
||||
|
||||
def _setup_from_config(self):
|
||||
assert self.config is not None, "Config should not be None."
|
||||
|
||||
@@ -347,8 +368,7 @@ class Crew(BaseModel):
|
||||
if self.process == Process.sequential:
|
||||
result = self._run_sequential_process()
|
||||
elif self.process == Process.hierarchical:
|
||||
result, manager_metrics = self._run_hierarchical_process() # type: ignore # Incompatible types in assignment (expression has type "str | dict[str, Any]", variable has type "str")
|
||||
metrics.append(manager_metrics)
|
||||
result = self._run_hierarchical_process() # type: ignore # Incompatible types in assignment (expression has type "str | dict[str, Any]", variable has type "str")
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"The process '{self.process}' is not implemented yet."
|
||||
@@ -502,11 +522,6 @@ class Crew(BaseModel):
|
||||
task_outputs.append(task_output)
|
||||
self._process_task_result(future_task, task_output)
|
||||
|
||||
final_string_output = aggregate_raw_outputs_from_task_outputs(task_outputs)
|
||||
self._finish_execution(final_string_output)
|
||||
|
||||
token_usage = self.calculate_usage_metrics()
|
||||
|
||||
# Important: There should only be one task output in the list
|
||||
# If there are more or 0, something went wrong.
|
||||
if len(task_outputs) != 1:
|
||||
@@ -516,10 +531,15 @@ class Crew(BaseModel):
|
||||
|
||||
final_task_output = task_outputs[0]
|
||||
|
||||
final_string_output = final_task_output.raw
|
||||
self._finish_execution(final_string_output)
|
||||
|
||||
token_usage = self.calculate_usage_metrics()
|
||||
|
||||
return CrewOutput(
|
||||
_raw=final_task_output.raw,
|
||||
_pydantic=final_task_output.pydantic,
|
||||
_json=final_task_output.json,
|
||||
raw=final_task_output.raw,
|
||||
pydantic=final_task_output.pydantic,
|
||||
json_dict=final_task_output.json_dict,
|
||||
tasks_output=[task.output for task in self.tasks if task.output],
|
||||
token_usage=token_usage,
|
||||
)
|
||||
@@ -530,7 +550,8 @@ class Crew(BaseModel):
|
||||
if self.output_log_file:
|
||||
self._file_handler.log(agent=role, task=output, status="completed")
|
||||
|
||||
def _run_hierarchical_process(self) -> Tuple[CrewOutput, Dict[str, Any]]:
|
||||
# TODO: @joao, Breaking change. Changed return type. Usage metrics is included in crewoutput
|
||||
def _run_hierarchical_process(self) -> CrewOutput:
|
||||
"""Creates and assigns a manager agent to make sure the crew completes the tasks."""
|
||||
i18n = I18N(prompt_file=self.prompt_file)
|
||||
if self.manager_agent is not None:
|
||||
@@ -564,7 +585,11 @@ class Crew(BaseModel):
|
||||
)
|
||||
|
||||
if task.async_execution:
|
||||
context = aggregate_raw_outputs_from_task_outputs(task_outputs)
|
||||
context = (
|
||||
aggregate_raw_outputs_from_tasks(task.context)
|
||||
if task.context
|
||||
else aggregate_raw_outputs_from_task_outputs(task_outputs)
|
||||
)
|
||||
future = task.execute_async(
|
||||
agent=manager, context=context, tools=manager.tools
|
||||
)
|
||||
@@ -582,7 +607,11 @@ class Crew(BaseModel):
|
||||
# Clear the futures list after processing all async results
|
||||
futures.clear()
|
||||
|
||||
context = aggregate_raw_outputs_from_task_outputs(task_outputs)
|
||||
context = (
|
||||
aggregate_raw_outputs_from_tasks(task.context)
|
||||
if task.context
|
||||
else aggregate_raw_outputs_from_task_outputs(task_outputs)
|
||||
)
|
||||
task_output = task.execute_sync(
|
||||
agent=manager, context=context, tools=manager.tools
|
||||
)
|
||||
@@ -598,14 +627,26 @@ class Crew(BaseModel):
|
||||
task_outputs.append(task_output)
|
||||
self._process_task_result(future_task, task_output)
|
||||
|
||||
final_string_output = aggregate_raw_outputs_from_task_outputs(task_outputs)
|
||||
# Important: There should only be one task output in the list
|
||||
# If there are more or 0, something went wrong.
|
||||
if len(task_outputs) != 1:
|
||||
raise ValueError(
|
||||
"Something went wrong. Kickoff should return only one task output."
|
||||
)
|
||||
|
||||
final_task_output = task_outputs[0]
|
||||
|
||||
final_string_output = final_task_output.raw
|
||||
self._finish_execution(final_string_output)
|
||||
|
||||
token_usage = self.calculate_usage_metrics()
|
||||
|
||||
return (
|
||||
self._format_output(task_outputs, token_usage),
|
||||
token_usage,
|
||||
return CrewOutput(
|
||||
raw=final_task_output.raw,
|
||||
pydantic=final_task_output.pydantic,
|
||||
json_dict=final_task_output.json_dict,
|
||||
tasks_output=[task.output for task in self.tasks if task.output],
|
||||
token_usage=token_usage,
|
||||
)
|
||||
|
||||
def copy(self):
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import json
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from pydantic import BaseModel, Field, PrivateAttr
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.tasks.output_format import OutputFormat
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
@@ -9,9 +10,13 @@ from crewai.tasks.task_output import TaskOutput
|
||||
class CrewOutput(BaseModel):
|
||||
"""Class that represents the result of a crew."""
|
||||
|
||||
_raw: str = PrivateAttr(default="")
|
||||
_pydantic: Optional[BaseModel] = PrivateAttr(default=None)
|
||||
_json: Optional[Dict[str, Any]] = PrivateAttr(default=None)
|
||||
raw: str = Field(description="Raw output of crew", default="")
|
||||
pydantic: Optional[BaseModel] = Field(
|
||||
description="Pydantic output of Crew", default=None
|
||||
)
|
||||
json_dict: Optional[Dict[str, Any]] = Field(
|
||||
description="JSON dict output of Crew", default=None
|
||||
)
|
||||
tasks_output: list[TaskOutput] = Field(
|
||||
description="Output of each task", default=[]
|
||||
)
|
||||
@@ -19,32 +24,30 @@ class CrewOutput(BaseModel):
|
||||
description="Processed token summary", default={}
|
||||
)
|
||||
|
||||
@property
|
||||
def raw(self) -> str:
|
||||
return self._raw
|
||||
# TODO: Joao - Adding this safety check breakes when people want to see
|
||||
# The full output of a CrewOutput.
|
||||
# @property
|
||||
# def pydantic(self) -> Optional[BaseModel]:
|
||||
# # Check if the final task output included a pydantic model
|
||||
# if self.tasks_output[-1].output_format != OutputFormat.PYDANTIC:
|
||||
# raise ValueError(
|
||||
# "No pydantic model found in the final task. Please make sure to set the output_pydantic property in the final task in your crew."
|
||||
# )
|
||||
|
||||
# return self._pydantic
|
||||
|
||||
@property
|
||||
def pydantic(self) -> Optional[BaseModel]:
|
||||
# Check if the final task output included a pydantic model
|
||||
if self.tasks_output[-1].output_format != OutputFormat.PYDANTIC:
|
||||
raise ValueError(
|
||||
"No pydantic model found in the final task. Please make sure to set the output_pydantic property in the final task in your crew."
|
||||
)
|
||||
|
||||
return self._pydantic
|
||||
|
||||
@property
|
||||
def json(self) -> Optional[Dict[str, Any]]:
|
||||
def json(self) -> Optional[str]:
|
||||
if self.tasks_output[-1].output_format != OutputFormat.JSON:
|
||||
raise ValueError(
|
||||
"No JSON output found in the final task. Please make sure to set the output_json property in the final task in your crew."
|
||||
)
|
||||
|
||||
return self._json
|
||||
return json.dumps(self.json_dict)
|
||||
|
||||
def to_output_dict(self) -> Dict[str, Any]:
|
||||
if self.json:
|
||||
return self.json
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
if self.json_dict:
|
||||
return self.json_dict
|
||||
if self.pydantic:
|
||||
return self.pydantic.model_dump()
|
||||
raise ValueError("No output to convert to dictionary")
|
||||
@@ -52,6 +55,6 @@ class CrewOutput(BaseModel):
|
||||
def __str__(self):
|
||||
if self.pydantic:
|
||||
return str(self.pydantic)
|
||||
if self.json:
|
||||
return str(self.json)
|
||||
if self.json_dict:
|
||||
return str(self.json_dict)
|
||||
return self.raw
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import threading
|
||||
@@ -99,6 +100,10 @@ class Task(BaseModel):
|
||||
description="Whether the task should have a human review the final answer of the agent",
|
||||
default=False,
|
||||
)
|
||||
converter_cls: Optional[Type[Converter]] = Field(
|
||||
description="A converter class used to export structured output",
|
||||
default=None,
|
||||
)
|
||||
|
||||
_telemetry: Telemetry
|
||||
_execution_span: Span | None = None
|
||||
@@ -216,16 +221,13 @@ class Task(BaseModel):
|
||||
tools=tools,
|
||||
)
|
||||
|
||||
if self.output_file:
|
||||
self._save_output(result)
|
||||
|
||||
pydantic_output, json_output = self._export_output(result)
|
||||
|
||||
task_output = TaskOutput(
|
||||
description=self.description,
|
||||
raw=result,
|
||||
pydantic=pydantic_output,
|
||||
json=json_output,
|
||||
json_dict=json_output,
|
||||
agent=agent.role,
|
||||
output_format=self._get_output_format(),
|
||||
)
|
||||
@@ -238,6 +240,15 @@ class Task(BaseModel):
|
||||
self._telemetry.task_ended(self._execution_span, self)
|
||||
self._execution_span = None
|
||||
|
||||
if self.output_file:
|
||||
content = (
|
||||
json_output
|
||||
if json_output
|
||||
else pydantic_output.model_dump_json() if pydantic_output else result
|
||||
)
|
||||
print("CALLING SAVE FILE", content)
|
||||
self._save_file(content)
|
||||
|
||||
return task_output
|
||||
|
||||
def prompt(self) -> str:
|
||||
@@ -312,10 +323,19 @@ class Task(BaseModel):
|
||||
|
||||
if self.output_pydantic or self.output_json:
|
||||
model_output = self._convert_to_model(result)
|
||||
print("MODEL OUTPUT", model_output)
|
||||
pydantic_output = (
|
||||
model_output if isinstance(model_output, BaseModel) else None
|
||||
)
|
||||
json_output = model_output if isinstance(model_output, dict) else None
|
||||
print("PYDANTIC OUTPUT", pydantic_output)
|
||||
if isinstance(model_output, str):
|
||||
try:
|
||||
json_output = json.loads(model_output)
|
||||
except json.JSONDecodeError:
|
||||
json_output = None
|
||||
else:
|
||||
json_output = model_output if isinstance(model_output, dict) else None
|
||||
print("JSON OUTPUT", json_output)
|
||||
|
||||
return pydantic_output, json_output
|
||||
|
||||
@@ -327,13 +347,18 @@ class Task(BaseModel):
|
||||
try:
|
||||
return self._validate_model(result, model)
|
||||
except Exception:
|
||||
print("EXCEPTION IN _convert_to_model")
|
||||
return self._handle_partial_json(result, model)
|
||||
|
||||
def _validate_model(
|
||||
self, result: str, model: Type[BaseModel]
|
||||
) -> Union[dict, BaseModel]:
|
||||
print("VALIDATE MODEL - RESULT", result)
|
||||
print("VALIDATE MODEL - MODEL", model)
|
||||
exported_result = model.model_validate_json(result)
|
||||
print("EXPORTED RESULT", exported_result)
|
||||
if self.output_json:
|
||||
print("HERE IN _validate_model", self.output_json)
|
||||
return exported_result.model_dump()
|
||||
return exported_result
|
||||
|
||||
@@ -344,10 +369,12 @@ class Task(BaseModel):
|
||||
if match:
|
||||
try:
|
||||
exported_result = model.model_validate_json(match.group(0))
|
||||
print("EXPORTED RESULT in handle_partial", exported_result)
|
||||
if self.output_json:
|
||||
return exported_result.model_dump()
|
||||
return exported_result
|
||||
except Exception:
|
||||
print("EXCEPTION IN _handle_partial_json")
|
||||
pass
|
||||
|
||||
return self._convert_with_instructions(result, model)
|
||||
@@ -355,16 +382,21 @@ class Task(BaseModel):
|
||||
def _convert_with_instructions(
|
||||
self, result: str, model: Type[BaseModel]
|
||||
) -> Union[dict, BaseModel, str]:
|
||||
print("CONVERT WITH INSTRUCTIONS - RESULT", result)
|
||||
print("CONVERT WITH INSTRUCTIONS - model", model)
|
||||
llm = self.agent.function_calling_llm or self.agent.llm
|
||||
instructions = self._get_conversion_instructions(model, llm)
|
||||
|
||||
converter = Converter(
|
||||
llm=llm, text=result, model=model, instructions=instructions
|
||||
)
|
||||
print("CONVERTER", converter)
|
||||
exported_result = (
|
||||
converter.to_pydantic() if self.output_pydantic else converter.to_json()
|
||||
)
|
||||
|
||||
print("EXPORTED RESULT IN CONVERT WITH INSTRUCTIONS", exported_result)
|
||||
|
||||
if isinstance(exported_result, ConverterError):
|
||||
Printer().print(
|
||||
content=f"{exported_result.message} Using raw output instead.",
|
||||
@@ -402,6 +434,7 @@ class Task(BaseModel):
|
||||
return isinstance(llm, ChatOpenAI) and llm.openai_api_base is None
|
||||
|
||||
def _save_file(self, result: Any) -> None:
|
||||
print("SAVING FILE with content", result)
|
||||
directory = os.path.dirname(self.output_file) # type: ignore # Value of type variable "AnyOrLiteralStr" of "dirname" cannot be "str | None"
|
||||
|
||||
if directory and not os.path.exists(directory):
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import json
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from pydantic import BaseModel, Field, model_validator
|
||||
@@ -11,12 +12,14 @@ class TaskOutput(BaseModel):
|
||||
description: str = Field(description="Description of the task")
|
||||
summary: Optional[str] = Field(description="Summary of the task", default=None)
|
||||
raw: str = Field(
|
||||
description="Result of the task"
|
||||
description="Raw output of the task", default=""
|
||||
) # TODO: @joao: breaking change, by renaming raw_output to raw, but now consistent with CrewOutput
|
||||
pydantic: Optional[BaseModel] = Field(
|
||||
description="Pydantic model output", default=None
|
||||
description="Pydantic output of task", default=None
|
||||
)
|
||||
json_dict: Optional[Dict[str, Any]] = Field(
|
||||
description="JSON dictionary of task", default=None
|
||||
)
|
||||
json: Optional[Dict[str, Any]] = Field(description="JSON output", default=None)
|
||||
agent: str = Field(description="Agent that executed the task")
|
||||
output_format: OutputFormat = Field(
|
||||
description="Output format of the task", default=OutputFormat.RAW
|
||||
@@ -29,11 +32,40 @@ class TaskOutput(BaseModel):
|
||||
self.summary = f"{excerpt}..."
|
||||
return self
|
||||
|
||||
# TODO: Joao - Adding this safety check breakes when people want to see
|
||||
# The full output of a TaskOutput or CrewOutput.
|
||||
# @property
|
||||
# def pydantic(self) -> Optional[BaseModel]:
|
||||
# # Check if the final task output included a pydantic model
|
||||
# if self.output_format != OutputFormat.PYDANTIC:
|
||||
# raise ValueError(
|
||||
# """
|
||||
# Invalid output format requested.
|
||||
# If you would like to access the pydantic model,
|
||||
# please make sure to set the output_pydantic property for the task.
|
||||
# """
|
||||
# )
|
||||
|
||||
# return self._pydantic
|
||||
|
||||
@property
|
||||
def json(self) -> Optional[str]:
|
||||
if self.output_format != OutputFormat.JSON:
|
||||
raise ValueError(
|
||||
"""
|
||||
Invalid output format requested.
|
||||
If you would like to access the JSON output,
|
||||
please make sure to set the output_json property for the task
|
||||
"""
|
||||
)
|
||||
|
||||
return json.dumps(self.json_dict)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Convert json_output and pydantic_output to a dictionary."""
|
||||
output_dict = {}
|
||||
if self.json:
|
||||
output_dict.update(self.json)
|
||||
if self.json_dict:
|
||||
output_dict.update(self.json_dict)
|
||||
if self.pydantic:
|
||||
output_dict.update(self.pydantic.model_dump())
|
||||
return output_dict
|
||||
@@ -41,6 +73,6 @@ class TaskOutput(BaseModel):
|
||||
def __str__(self) -> str:
|
||||
if self.pydantic:
|
||||
return str(self.pydantic)
|
||||
if self.json:
|
||||
return str(self.json)
|
||||
if self.json_dict:
|
||||
return str(self.json_dict)
|
||||
return self.raw
|
||||
|
||||
@@ -4,6 +4,10 @@ from unittest import mock
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
from langchain.tools import tool
|
||||
from langchain_core.exceptions import OutputParserException
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
from crewai import Agent, Crew, Task
|
||||
from crewai.agents.cache import CacheHandler
|
||||
from crewai.agents.executor import CrewAgentExecutor
|
||||
@@ -11,9 +15,6 @@ from crewai.agents.parser import CrewAgentParser
|
||||
from crewai.tools.tool_calling import InstructorToolCalling
|
||||
from crewai.tools.tool_usage import ToolUsage
|
||||
from crewai.utilities import RPMController
|
||||
from langchain.tools import tool
|
||||
from langchain_core.exceptions import OutputParserException
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
|
||||
def test_agent_creation():
|
||||
@@ -630,8 +631,9 @@ def test_agent_use_specific_tasks_output_as_context(capsys):
|
||||
|
||||
crew = Crew(agents=[agent1, agent2], tasks=tasks)
|
||||
result = crew.kickoff()
|
||||
assert "bye" not in result.raw_output().lower()
|
||||
assert "hi" in result.raw_output().lower() or "hello" in result.raw_output().lower()
|
||||
print("LOWER RESULT", result.raw)
|
||||
assert "bye" not in result.raw.lower()
|
||||
assert "hi" in result.raw.lower() or "hello" in result.raw.lower()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -643,7 +645,7 @@ def test_agent_step_callback():
|
||||
with patch.object(StepCallback, "callback") as callback:
|
||||
|
||||
@tool
|
||||
def learn_about_AI(topic) -> float:
|
||||
def learn_about_AI(topic) -> str:
|
||||
"""Useful for when you need to learn about AI to write an paragraph about it."""
|
||||
return "AI is a very broad field."
|
||||
|
||||
@@ -677,7 +679,7 @@ def test_agent_function_calling_llm():
|
||||
with patch.object(llm.client, "create", wraps=llm.client.create) as private_mock:
|
||||
|
||||
@tool
|
||||
def learn_about_AI(topic) -> float:
|
||||
def learn_about_AI(topic) -> str:
|
||||
"""Useful for when you need to learn about AI to write an paragraph about it."""
|
||||
return "AI is a very broad field."
|
||||
|
||||
@@ -749,8 +751,8 @@ def test_tool_result_as_answer_is_the_final_answer_for_the_agent():
|
||||
crew = Crew(agents=[agent1], tasks=tasks)
|
||||
|
||||
result = crew.kickoff()
|
||||
print("RESULT: ", result.raw_output())
|
||||
assert result.raw_output() == "Howdy!"
|
||||
print("RESULT: ", result.raw)
|
||||
assert result.raw == "Howdy!"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
|
||||
1431
tests/cassettes/test_crew_kickoff_usage_metrics.yaml
Normal file
1431
tests/cassettes/test_crew_kickoff_usage_metrics.yaml
Normal file
File diff suppressed because it is too large
Load Diff
1464
tests/cassettes/test_kickoff_for_each_multiple_inputs.yaml
Normal file
1464
tests/cassettes/test_kickoff_for_each_multiple_inputs.yaml
Normal file
File diff suppressed because it is too large
Load Diff
192
tests/cassettes/test_kickoff_for_each_single_input.yaml
Normal file
192
tests/cassettes/test_kickoff_for_each_single_input.yaml
Normal file
@@ -0,0 +1,192 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"content": "You are dog Researcher. You have a lot of experience
|
||||
with dog.\nYour personal goal is: Express hot takes on dog.To give my best complete
|
||||
final answer to the task use the exact following format:\n\nThought: I now can
|
||||
give a great answer\nFinal Answer: my best complete final answer to the task.\nYour
|
||||
final answer must be the great and the most complete as possible, it must be
|
||||
outcome described.\n\nI MUST use these formats, my job depends on it!\nCurrent
|
||||
Task: Give me an analysis around dog.\n\nThis is the expect criteria for your
|
||||
final answer: 1 bullet point about dog that''s under 15 words. \n you MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:\n", "role": "user"}], "model": "gpt-4o",
|
||||
"n": 1, "stop": ["\nObservation"], "stream": true, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '951'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.35.10
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.35.10
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-9j74gJiY9YxFxbeZ5jmpPclWEeaiP","object":"chat.completion.chunk","created":1720538982,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9j74gJiY9YxFxbeZ5jmpPclWEeaiP","object":"chat.completion.chunk","created":1720538982,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9j74gJiY9YxFxbeZ5jmpPclWEeaiP","object":"chat.completion.chunk","created":1720538982,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9j74gJiY9YxFxbeZ5jmpPclWEeaiP","object":"chat.completion.chunk","created":1720538982,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9j74gJiY9YxFxbeZ5jmpPclWEeaiP","object":"chat.completion.chunk","created":1720538982,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
now"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9j74gJiY9YxFxbeZ5jmpPclWEeaiP","object":"chat.completion.chunk","created":1720538982,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
can"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9j74gJiY9YxFxbeZ5jmpPclWEeaiP","object":"chat.completion.chunk","created":1720538982,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
give"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9j74gJiY9YxFxbeZ5jmpPclWEeaiP","object":"chat.completion.chunk","created":1720538982,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
a"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9j74gJiY9YxFxbeZ5jmpPclWEeaiP","object":"chat.completion.chunk","created":1720538982,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
great"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9j74gJiY9YxFxbeZ5jmpPclWEeaiP","object":"chat.completion.chunk","created":1720538982,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9j74gJiY9YxFxbeZ5jmpPclWEeaiP","object":"chat.completion.chunk","created":1720538982,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9j74gJiY9YxFxbeZ5jmpPclWEeaiP","object":"chat.completion.chunk","created":1720538982,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9j74gJiY9YxFxbeZ5jmpPclWEeaiP","object":"chat.completion.chunk","created":1720538982,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
Answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9j74gJiY9YxFxbeZ5jmpPclWEeaiP","object":"chat.completion.chunk","created":1720538982,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9j74gJiY9YxFxbeZ5jmpPclWEeaiP","object":"chat.completion.chunk","created":1720538982,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
Dogs"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9j74gJiY9YxFxbeZ5jmpPclWEeaiP","object":"chat.completion.chunk","created":1720538982,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
are"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9j74gJiY9YxFxbeZ5jmpPclWEeaiP","object":"chat.completion.chunk","created":1720538982,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
unparalleled"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9j74gJiY9YxFxbeZ5jmpPclWEeaiP","object":"chat.completion.chunk","created":1720538982,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
in"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9j74gJiY9YxFxbeZ5jmpPclWEeaiP","object":"chat.completion.chunk","created":1720538982,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
loyalty"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9j74gJiY9YxFxbeZ5jmpPclWEeaiP","object":"chat.completion.chunk","created":1720538982,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
and"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9j74gJiY9YxFxbeZ5jmpPclWEeaiP","object":"chat.completion.chunk","created":1720538982,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
companionship"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9j74gJiY9YxFxbeZ5jmpPclWEeaiP","object":"chat.completion.chunk","created":1720538982,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
to"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9j74gJiY9YxFxbeZ5jmpPclWEeaiP","object":"chat.completion.chunk","created":1720538982,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"
|
||||
humans"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9j74gJiY9YxFxbeZ5jmpPclWEeaiP","object":"chat.completion.chunk","created":1720538982,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9j74gJiY9YxFxbeZ5jmpPclWEeaiP","object":"chat.completion.chunk","created":1720538982,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4008e3b719","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8a0959de6b916783-ATL
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Tue, 09 Jul 2024 15:29:42 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=LA.xC.jE_aMjiSgGgU6kDsBPhb_akgqn_4Rx.jXYfnQ-1720538982-1.0.1.1-l5Q1BHprIz5Jxb4HWyYsMfbg6mEnP2H95Vxt89ez24pKOb__90s8LJBBqK52zmPNcPYSSUcaR0wRAaSVFoa4Fw;
|
||||
path=/; expires=Tue, 09-Jul-24 15:59:42 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=zzJ51X.VwRkIq7VLCg9xPQGbXoUmAH6b.2g6sf6Y58Y-1720538982657-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '240'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '22000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '21999783'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_abdec68aded596628dfd5b999919447d
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -144,10 +144,10 @@ def test_crew_creation():
|
||||
expected_string_output = "1. **The Rise of AI in Healthcare**: The convergence of AI and healthcare is a promising frontier, offering unprecedented opportunities for disease diagnosis and patient outcome prediction. AI's potential to revolutionize healthcare lies in its capacity to synthesize vast amounts of data, generating precise and efficient results. This technological breakthrough, however, is not just about improving accuracy and efficiency; it's about saving lives. As we stand on the precipice of this transformative era, we must prepare for the complex challenges and ethical questions it poses, while embracing its ability to reshape healthcare as we know it.\n\n2. **Ethical Implications of AI**: As AI intertwines with our daily lives, it presents a complex web of ethical dilemmas. This fusion of technology, philosophy, and ethics is not merely academically intriguing but profoundly impacts the fabric of our society. The questions raised range from decision-making transparency to accountability, and from privacy to potential biases. As we navigate this ethical labyrinth, it is crucial to establish robust frameworks and regulations to ensure that AI serves humanity, and not the other way around.\n\n3. **AI and Data Privacy**: The rise of AI brings with it an insatiable appetite for data, spawning new debates around privacy rights. Balancing the potential benefits of AI with the right to privacy is a unique challenge that intersects technology, law, and human rights. In an increasingly digital world, where personal information forms the backbone of many services, we must grapple with these issues. It's time to redefine the concept of privacy and devise innovative solutions that ensure our digital footprints are not abused.\n\n4. **AI in Job Market**: The discourse around AI's impact on employment is a narrative of contrast, a tale of displacement and creation. On one hand, AI threatens to automate a multitude of jobs, on the other, it promises to create new roles that we cannot yet imagine. This intersection of technology, economics, and labor rights is a critical dialogue that will shape our future. As we stand at this crossroads, we must not only brace ourselves for the changes but also seize the opportunities that this technological wave brings.\n\n5. **Future of AI Agents**: The evolution of AI agents signifies a leap towards a future where AI is not just a tool, but a partner. These sophisticated AI agents, employed in customer service to personal assistants, are redefining our interactions with technology. As we gaze into the future of AI agents, we see a landscape of possibilities and challenges. This journey will be about harnessing the potential of AI agents while navigating the issues of trust, dependence, and ethical use."
|
||||
|
||||
assert str(result) == expected_string_output
|
||||
assert result.raw_output() == expected_string_output
|
||||
assert result.raw == expected_string_output
|
||||
assert isinstance(result, CrewOutput)
|
||||
assert len(result.tasks_output) == len(tasks)
|
||||
assert result.result() == [expected_string_output]
|
||||
assert result.raw == expected_string_output
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -174,7 +174,7 @@ def test_sync_task_execution():
|
||||
)
|
||||
|
||||
mock_task_output = TaskOutput(
|
||||
description="Mock description", raw_output="mocked output", agent="mocked agent"
|
||||
description="Mock description", raw="mocked output", agent="mocked agent"
|
||||
)
|
||||
|
||||
# Because we are mocking execute_sync, we never hit the underlying _execute_core
|
||||
@@ -210,7 +210,7 @@ def test_hierarchical_process():
|
||||
result = crew.kickoff()
|
||||
|
||||
assert (
|
||||
result.raw_output()
|
||||
result.raw
|
||||
== "1. 'Demystifying AI: An in-depth exploration of Artificial Intelligence for the layperson' - In this piece, we will unravel the enigma of AI, simplifying its complexities into digestible information for the everyday individual. By using relatable examples and analogies, we will journey through the neural networks and machine learning algorithms that define AI, without the jargon and convoluted explanations that often accompany such topics.\n\n2. 'The Role of AI in Startups: A Game Changer?' - Startups today are harnessing the power of AI to revolutionize their businesses. This article will delve into how AI, as an innovative force, is shaping the startup ecosystem, transforming everything from customer service to product development. We'll explore real-life case studies of startups that have leveraged AI to accelerate their growth and disrupt their respective industries.\n\n3. 'AI and Ethics: Navigating the Complex Landscape' - AI brings with it not just technological advancements, but ethical dilemmas as well. This article will engage readers in a thought-provoking discussion on the ethical implications of AI, exploring issues like bias in algorithms, privacy concerns, job displacement, and the moral responsibility of AI developers. We will also discuss potential solutions and frameworks to address these challenges.\n\n4. 'Unveiling the AI Agents: The Future of Customer Service' - AI agents are poised to reshape the customer service landscape, offering businesses the ability to provide round-the-clock support and personalized experiences. In this article, we'll dive deep into the world of AI agents, examining how they work, their benefits and limitations, and how they're set to redefine customer interactions in the digital age.\n\n5. 'From Science Fiction to Reality: AI in Everyday Life' - AI, once a concept limited to the realm of sci-fi, has now permeated our daily lives. This article will highlight the ubiquitous presence of AI, from voice assistants and recommendation algorithms, to autonomous vehicles and smart homes. We'll explore how AI, in its various forms, is transforming our everyday experiences, making the future seem a lot closer than we imagined."
|
||||
)
|
||||
|
||||
@@ -248,7 +248,7 @@ def test_crew_with_delegating_agents():
|
||||
result = crew.kickoff()
|
||||
|
||||
assert (
|
||||
result.raw_output()
|
||||
result.raw
|
||||
== "AI Agents, simply put, are intelligent systems that can perceive their environment and take actions to reach specific goals. Imagine them as digital assistants that can learn, adapt and make decisions. They operate in the realms of software or hardware, like a chatbot on a website or a self-driving car. The key to their intelligence is their ability to learn from their experiences, making them better at their tasks over time. In today's interconnected world, AI agents are transforming our lives. They enhance customer service experiences, streamline business processes, and even predict trends in data. Vehicles equipped with AI agents are making transportation safer. In healthcare, AI agents are helping to diagnose diseases, personalizing treatment plans, and monitoring patient health. As we embrace the digital era, these AI agents are not just important, they're becoming indispensable, shaping a future where technology works intuitively and intelligently to meet our needs."
|
||||
)
|
||||
|
||||
@@ -413,7 +413,7 @@ def test_api_calls_throttling(capsys):
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_kickoff_for_each_full_ouput():
|
||||
def test_crew_kickoff_usage_metrics():
|
||||
inputs = [
|
||||
{"topic": "dog"},
|
||||
{"topic": "cat"},
|
||||
@@ -432,14 +432,11 @@ def test_crew_kickoff_for_each_full_ouput():
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task], full_output=True)
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
results = crew.kickoff_for_each(inputs=inputs)
|
||||
|
||||
assert len(results) == len(inputs)
|
||||
for result in results:
|
||||
assert "usage_metrics" in result
|
||||
assert isinstance(result["usage_metrics"], dict)
|
||||
|
||||
# Assert that all required keys are in usage_metrics and their values are not None
|
||||
for key in [
|
||||
"total_tokens",
|
||||
@@ -447,8 +444,8 @@ def test_crew_kickoff_for_each_full_ouput():
|
||||
"completion_tokens",
|
||||
"successful_requests",
|
||||
]:
|
||||
assert key in result["usage_metrics"]
|
||||
assert result["usage_metrics"][key] > 0
|
||||
assert key in result.token_usage
|
||||
assert result.token_usage[key] > 0
|
||||
|
||||
|
||||
def test_agents_rpm_is_never_set_if_crew_max_RPM_is_not_set():
|
||||
@@ -515,48 +512,11 @@ def test_sequential_async_task_execution_completion():
|
||||
)
|
||||
|
||||
sequential_result = sequential_crew.kickoff()
|
||||
assert sequential_result.raw_output().startswith(
|
||||
assert sequential_result.raw.startswith(
|
||||
"**The Evolution of Artificial Intelligence: A Journey Through Milestones**"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_hierarchical_async_task_execution_completion():
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
list_ideas = Task(
|
||||
description="Give me a list of 5 interesting ideas to explore for na article, what makes them unique and interesting.",
|
||||
expected_output="Bullet point list of 5 important events.",
|
||||
agent=researcher,
|
||||
async_execution=True,
|
||||
)
|
||||
list_important_history = Task(
|
||||
description="Research the history of AI and give me the 5 most important events that shaped the technology.",
|
||||
expected_output="Bullet point list of 5 important events.",
|
||||
agent=researcher,
|
||||
async_execution=True,
|
||||
)
|
||||
write_article = Task(
|
||||
description="Write an article about the history of AI and its most important events.",
|
||||
expected_output="A 4 paragraph article about AI.",
|
||||
agent=writer,
|
||||
context=[list_ideas, list_important_history],
|
||||
)
|
||||
|
||||
hierarchical_crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
process=Process.hierarchical,
|
||||
tasks=[list_ideas, list_important_history, write_article],
|
||||
manager_llm=ChatOpenAI(temperature=0, model="gpt-4"),
|
||||
)
|
||||
|
||||
hierarchical_result = hierarchical_crew.kickoff()
|
||||
|
||||
assert hierarchical_result.raw_output().startswith(
|
||||
"The history of artificial intelligence (AI) is a fascinating journey"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_single_task_with_async_execution():
|
||||
|
||||
@@ -581,8 +541,7 @@ def test_single_task_with_async_execution():
|
||||
)
|
||||
|
||||
result = crew.kickoff()
|
||||
print(result.raw_output())
|
||||
assert result.raw_output().startswith(
|
||||
assert result.raw.startswith(
|
||||
"- The impact of AI agents on remote work productivity."
|
||||
)
|
||||
|
||||
@@ -615,17 +574,21 @@ def test_three_task_with_async_execution():
|
||||
async_execution=True,
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[researcher_agent],
|
||||
process=Process.sequential,
|
||||
tasks=[bullet_list, numbered_list, letter_list],
|
||||
)
|
||||
# Expected result is that we will get an error
|
||||
# because a crew can end only end with one or less
|
||||
# async tasks
|
||||
with pytest.raises(pydantic_core._pydantic_core.ValidationError) as error:
|
||||
Crew(
|
||||
agents=[researcher_agent],
|
||||
process=Process.sequential,
|
||||
tasks=[bullet_list, numbered_list, letter_list],
|
||||
)
|
||||
|
||||
# Expected result is that we are going to concatenate the output from each async task.
|
||||
# Because we add a buffer between each task, we should see a "----------" string
|
||||
# after the first and second task in the final output.
|
||||
result = crew.kickoff()
|
||||
assert result.raw_output().count("\n\n----------\n\n") == 2
|
||||
assert error.value.errors()[0]["type"] == "async_task_count"
|
||||
assert (
|
||||
"The crew must end with at most one asynchronous task."
|
||||
in error.value.errors()[0]["msg"]
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -649,7 +612,7 @@ async def test_crew_async_kickoff():
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task], full_output=True)
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
results = await crew.kickoff_for_each_async(inputs=inputs)
|
||||
|
||||
assert len(results) == len(inputs)
|
||||
@@ -662,8 +625,7 @@ async def test_crew_async_kickoff():
|
||||
"successful_requests",
|
||||
]:
|
||||
assert key in result.token_usage
|
||||
# TODO: FIX THIS WHEN USAGE METRICS ARE RE-DONE
|
||||
# assert result.token_usage[key] > 0
|
||||
assert result.token_usage[key] > 0
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -696,7 +658,7 @@ def test_async_task_execution_call_count():
|
||||
|
||||
# Create a valid TaskOutput instance to mock the return value
|
||||
mock_task_output = TaskOutput(
|
||||
description="Mock description", raw_output="mocked output", agent="mocked agent"
|
||||
description="Mock description", raw="mocked output", agent="mocked agent"
|
||||
)
|
||||
|
||||
# Create a MagicMock Future instance
|
||||
@@ -723,10 +685,8 @@ def test_async_task_execution_call_count():
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_kickoff_for_each_single_input():
|
||||
"""Tests if kickoff_for_each works with a single input."""
|
||||
from unittest.mock import patch
|
||||
|
||||
inputs = [{"topic": "dog"}]
|
||||
expected_outputs = ["Dogs are loyal companions and popular pets."]
|
||||
|
||||
agent = Agent(
|
||||
role="{topic} Researcher",
|
||||
@@ -740,15 +700,10 @@ def test_kickoff_for_each_single_input():
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
with patch.object(Agent, "execute_task") as mock_execute_task:
|
||||
mock_execute_task.side_effect = expected_outputs
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
results = crew.kickoff_for_each(inputs=inputs)
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
results = crew.kickoff_for_each(inputs=inputs)
|
||||
|
||||
assert len(results) == 1
|
||||
print("RESULT:", results)
|
||||
for result in results:
|
||||
assert result == expected_outputs[0]
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -761,11 +716,6 @@ def test_kickoff_for_each_multiple_inputs():
|
||||
{"topic": "cat"},
|
||||
{"topic": "apple"},
|
||||
]
|
||||
expected_outputs = [
|
||||
"Dogs are loyal companions and popular pets.",
|
||||
"Cats are independent and low-maintenance pets.",
|
||||
"Apples are a rich source of dietary fiber and vitamin C.",
|
||||
]
|
||||
|
||||
agent = Agent(
|
||||
role="{topic} Researcher",
|
||||
@@ -779,14 +729,10 @@ def test_kickoff_for_each_multiple_inputs():
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
with patch.object(Agent, "execute_task") as mock_execute_task:
|
||||
mock_execute_task.side_effect = expected_outputs
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
results = crew.kickoff_for_each(inputs=inputs)
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
results = crew.kickoff_for_each(inputs=inputs)
|
||||
|
||||
assert len(results) == len(inputs)
|
||||
for i, res in enumerate(results):
|
||||
assert res == expected_outputs[i]
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -1058,7 +1004,7 @@ def test_crew_function_calling_llm():
|
||||
with patch.object(llm.client, "create", wraps=llm.client.create) as private_mock:
|
||||
|
||||
@tool
|
||||
def learn_about_AI(topic) -> float:
|
||||
def learn_about_AI(topic) -> str:
|
||||
"""Useful for when you need to learn about AI to write an paragraph about it."""
|
||||
return "AI is a very broad field."
|
||||
|
||||
@@ -1107,7 +1053,7 @@ def test_task_with_no_arguments():
|
||||
crew = Crew(agents=[researcher], tasks=[task])
|
||||
|
||||
result = crew.kickoff()
|
||||
assert result.raw_output() == "75"
|
||||
assert result.raw == "75"
|
||||
|
||||
|
||||
def test_code_execution_flag_adds_code_tool_upon_kickoff():
|
||||
@@ -1174,39 +1120,12 @@ def test_agents_do_not_get_delegation_tools_with_there_is_only_one_agent():
|
||||
|
||||
result = crew.kickoff()
|
||||
assert (
|
||||
result.raw_output()
|
||||
result.raw
|
||||
== "Howdy! I hope this message finds you well and brings a smile to your face. Have a fantastic day!"
|
||||
)
|
||||
assert len(agent.tools) == 0
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_agent_usage_metrics_are_captured_for_sequential_process():
|
||||
agent = Agent(
|
||||
role="Researcher",
|
||||
goal="Be super empathetic.",
|
||||
backstory="You're love to sey howdy.",
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
task = Task(description="say howdy", expected_output="Howdy!", agent=agent)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
|
||||
result = crew.kickoff()
|
||||
assert result.raw_output() == "Howdy!"
|
||||
|
||||
required_keys = [
|
||||
"total_tokens",
|
||||
"prompt_tokens",
|
||||
"completion_tokens",
|
||||
"successful_requests",
|
||||
]
|
||||
for key in required_keys:
|
||||
assert key in crew.usage_metrics, f"Key '{key}' not found in usage_metrics"
|
||||
assert crew.usage_metrics[key] > 0, f"Value for key '{key}' is zero"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_sequential_crew_creation_tasks_without_agents():
|
||||
task = Task(
|
||||
@@ -1251,13 +1170,15 @@ def test_agent_usage_metrics_are_captured_for_hierarchical_process():
|
||||
)
|
||||
|
||||
result = crew.kickoff()
|
||||
assert result.raw_output() == '"Howdy!"'
|
||||
assert result.raw == '"Howdy!"'
|
||||
|
||||
print(crew.usage_metrics)
|
||||
|
||||
assert crew.usage_metrics == {
|
||||
"total_tokens": 1927,
|
||||
"prompt_tokens": 1557,
|
||||
"completion_tokens": 370,
|
||||
"successful_requests": 4,
|
||||
"total_tokens": 311,
|
||||
"prompt_tokens": 224,
|
||||
"completion_tokens": 87,
|
||||
"successful_requests": 1,
|
||||
}
|
||||
|
||||
|
||||
@@ -1282,15 +1203,17 @@ def test_hierarchical_crew_creation_tasks_with_agents():
|
||||
manager_llm=ChatOpenAI(model="gpt-4o"),
|
||||
)
|
||||
crew.kickoff()
|
||||
|
||||
assert crew.manager_agent is not None
|
||||
assert crew.manager_agent.tools is not None
|
||||
print("TOOL DESCRIPTION", crew.manager_agent.tools[0].description)
|
||||
assert crew.manager_agent.tools[0].description.startswith(
|
||||
"Delegate a specific task to one of the following coworkers: [Senior Writer]"
|
||||
"Delegate a specific task to one of the following coworkers: [Senior Writer, Researcher]"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_hierarchical_crew_creation_tasks_without_async_execution():
|
||||
def test_hierarchical_crew_creation_tasks_with_async_execution():
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
task = Task(
|
||||
@@ -1485,7 +1408,7 @@ def test_tools_with_custom_caching():
|
||||
from crewai_tools import tool
|
||||
|
||||
@tool
|
||||
def multiplcation_tool(first_number: int, second_number: int) -> str:
|
||||
def multiplcation_tool(first_number: int, second_number: int) -> int:
|
||||
"""Useful for when you need to multiply two numbers together."""
|
||||
return first_number * second_number
|
||||
|
||||
@@ -1547,7 +1470,7 @@ def test_tools_with_custom_caching():
|
||||
input={"first_number": 2, "second_number": 6},
|
||||
output=12,
|
||||
)
|
||||
assert result.raw_output() == "3"
|
||||
assert result.raw == "3"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -1646,7 +1569,7 @@ def test_manager_agent():
|
||||
)
|
||||
|
||||
mock_task_output = TaskOutput(
|
||||
description="Mock description", raw_output="mocked output", agent="mocked agent"
|
||||
description="Mock description", raw="mocked output", agent="mocked agent"
|
||||
)
|
||||
|
||||
# Because we are mocking execute_sync, we never hit the underlying _execute_core
|
||||
|
||||
@@ -4,12 +4,11 @@ import json
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from pydantic import BaseModel
|
||||
from pydantic_core import ValidationError
|
||||
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.utilities.converter import Converter
|
||||
from pydantic import BaseModel
|
||||
from pydantic_core import ValidationError
|
||||
|
||||
|
||||
def test_task_tool_reflect_agent_tools():
|
||||
@@ -109,6 +108,8 @@ def test_task_callback():
|
||||
|
||||
|
||||
def test_task_callback_returns_task_ouput():
|
||||
from crewai.tasks.output_format import OutputFormat
|
||||
|
||||
researcher = Agent(
|
||||
role="Researcher",
|
||||
goal="Make the best research and analysis on content about AI and AI agents",
|
||||
@@ -132,6 +133,8 @@ def test_task_callback_returns_task_ouput():
|
||||
task_completed.assert_called_once()
|
||||
callback_data = task_completed.call_args[0][0]
|
||||
|
||||
print("CALLBACK DATA", callback_data)
|
||||
|
||||
# Check if callback_data is TaskOutput object or JSON string
|
||||
if isinstance(callback_data, TaskOutput):
|
||||
callback_data = json.dumps(callback_data.model_dump())
|
||||
@@ -140,11 +143,14 @@ def test_task_callback_returns_task_ouput():
|
||||
output_dict = json.loads(callback_data)
|
||||
expected_output = {
|
||||
"description": task.description,
|
||||
"exported_output": "exported_ok",
|
||||
"raw_output": "exported_ok",
|
||||
"raw": "exported_ok",
|
||||
"pydantic": None,
|
||||
"json_dict": None,
|
||||
"agent": researcher.role,
|
||||
"summary": "Give me a list of 5 interesting ideas to explore...",
|
||||
"output_format": OutputFormat.RAW,
|
||||
}
|
||||
print("OUTPUT DICT", output_dict)
|
||||
assert output_dict == expected_output
|
||||
|
||||
|
||||
@@ -220,7 +226,7 @@ def test_output_pydantic():
|
||||
|
||||
crew = Crew(agents=[scorer], tasks=[task])
|
||||
result = crew.kickoff()
|
||||
assert isinstance(result, ScoreOutput)
|
||||
assert isinstance(result.pydantic, ScoreOutput)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -244,7 +250,8 @@ def test_output_json():
|
||||
|
||||
crew = Crew(agents=[scorer], tasks=[task])
|
||||
result = crew.kickoff()
|
||||
assert '{\n "score": 4\n}' == result
|
||||
print("RESULT JSON", result)
|
||||
assert '{"score": 4}' == result.json
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -280,7 +287,11 @@ def test_output_pydantic_to_another_task():
|
||||
|
||||
crew = Crew(agents=[scorer], tasks=[task1, task2], verbose=2)
|
||||
result = crew.kickoff()
|
||||
assert 5 == result.score
|
||||
pydantic_result = result.pydantic
|
||||
assert isinstance(
|
||||
pydantic_result, ScoreOutput
|
||||
), "Expected pydantic result to be of type ScoreOutput"
|
||||
assert 5 == pydantic_result.score
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -311,7 +322,7 @@ def test_output_json_to_another_task():
|
||||
|
||||
crew = Crew(agents=[scorer], tasks=[task1, task2])
|
||||
result = crew.kickoff()
|
||||
assert '{\n "score": 5\n}' == result
|
||||
assert '{"score": 5}' == result.json
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -363,7 +374,9 @@ def test_save_task_json_output():
|
||||
with patch.object(Task, "_save_file") as save_file:
|
||||
save_file.return_value = None
|
||||
crew.kickoff()
|
||||
save_file.assert_called_once_with('{\n "score": 4\n}')
|
||||
save_file.assert_called_once_with(
|
||||
{"score": 4}
|
||||
) # TODO: @Joao, should this be a dict or a json string?
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
|
||||
Reference in New Issue
Block a user