mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-28 09:38:17 +00:00
Implement major fixes from yesterdays group conversation. Now working on tests.
This commit is contained in:
@@ -4,7 +4,7 @@ import threading
|
||||
import uuid
|
||||
from concurrent.futures import Future
|
||||
from copy import copy
|
||||
from typing import Any, Dict, List, Optional, Type, Union
|
||||
from typing import Any, Dict, List, Optional, Tuple, Type, Union
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
from opentelemetry.trace import Span
|
||||
@@ -12,10 +12,10 @@ from pydantic import UUID4, BaseModel, Field, field_validator, model_validator
|
||||
from pydantic_core import PydanticCustomError
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.tasks.output_format import OutputFormat
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.telemetry.telemetry import Telemetry
|
||||
from crewai.utilities.converter import Converter, ConverterError
|
||||
from crewai.utilities.formatter import aggregate_raw_outputs_from_task_outputs
|
||||
from crewai.utilities.i18n import I18N
|
||||
from crewai.utilities.printer import Printer
|
||||
from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
|
||||
@@ -159,18 +159,6 @@ class Task(BaseModel):
|
||||
)
|
||||
return self
|
||||
|
||||
def wait_for_completion(self) -> str | BaseModel:
|
||||
"""Wait for asynchronous task completion and return the output."""
|
||||
assert self.async_execution, "Task is not set to be executed asynchronously."
|
||||
|
||||
if self._future:
|
||||
self._future.result() # Wait for the future to complete
|
||||
self._future = None
|
||||
|
||||
assert self.output, "Task output is not set."
|
||||
|
||||
return self.output.exported_output
|
||||
|
||||
def execute_sync(
|
||||
self,
|
||||
agent: Optional[BaseAgent] = None,
|
||||
@@ -219,31 +207,27 @@ class Task(BaseModel):
|
||||
|
||||
self._execution_span = self._telemetry.task_started(crew=agent.crew, task=self)
|
||||
|
||||
if self.context:
|
||||
task_outputs: List[TaskOutput] = []
|
||||
for task in self.context:
|
||||
if task.async_execution:
|
||||
task.wait_for_completion()
|
||||
if task.output:
|
||||
task_outputs.append(task.output)
|
||||
context = aggregate_raw_outputs_from_task_outputs(task_outputs)
|
||||
|
||||
self.prompt_context = context
|
||||
tools = tools or self.tools
|
||||
tools = tools or self.tools or []
|
||||
|
||||
result = agent.execute_task(
|
||||
task=self,
|
||||
context=context,
|
||||
tools=tools,
|
||||
)
|
||||
exported_output = self._export_output(result)
|
||||
|
||||
if self.output_file:
|
||||
self._save_output(result)
|
||||
|
||||
pydantic_output, json_output = self._export_output(result)
|
||||
|
||||
task_output = TaskOutput(
|
||||
description=self.description,
|
||||
raw_output=result,
|
||||
pydantic_output=exported_output["pydantic"],
|
||||
json_output=exported_output["json"],
|
||||
raw=result,
|
||||
pydantic=pydantic_output,
|
||||
json=json_output,
|
||||
agent=agent.role,
|
||||
output_format=self._get_output_format(),
|
||||
)
|
||||
self.output = task_output
|
||||
|
||||
@@ -289,7 +273,7 @@ class Task(BaseModel):
|
||||
"""Increment the delegations counter."""
|
||||
self.delegations += 1
|
||||
|
||||
def copy(self, agents: Optional[List["BaseAgent"]] = None) -> "Task":
|
||||
def copy(self, agents: List["BaseAgent"]) -> "Task":
|
||||
"""Create a deep copy of the Task."""
|
||||
exclude = {
|
||||
"id",
|
||||
@@ -322,26 +306,24 @@ class Task(BaseModel):
|
||||
|
||||
def _export_output(
|
||||
self, result: str
|
||||
) -> Dict[str, Union[BaseModel, Dict[str, Any]]]:
|
||||
output = {
|
||||
"pydantic": None,
|
||||
"json": None,
|
||||
}
|
||||
) -> Tuple[Optional[BaseModel], Optional[Dict[str, Any]]]:
|
||||
pydantic_output: Optional[BaseModel] = None
|
||||
json_output: Optional[Dict[str, Any]] = None
|
||||
|
||||
if self.output_pydantic or self.output_json:
|
||||
model_output = self._convert_to_model(result)
|
||||
output["pydantic"] = (
|
||||
pydantic_output = (
|
||||
model_output if isinstance(model_output, BaseModel) else None
|
||||
)
|
||||
output["json"] = model_output if isinstance(model_output, dict) else None
|
||||
json_output = model_output if isinstance(model_output, dict) else None
|
||||
|
||||
if self.output_file:
|
||||
self._save_output(output["raw"])
|
||||
|
||||
return output
|
||||
return pydantic_output, json_output
|
||||
|
||||
def _convert_to_model(self, result: str) -> Union[dict, BaseModel, str]:
|
||||
model = self.output_pydantic or self.output_json
|
||||
if model is None:
|
||||
return result
|
||||
|
||||
try:
|
||||
return self._validate_model(result, model)
|
||||
except Exception:
|
||||
@@ -392,6 +374,13 @@ class Task(BaseModel):
|
||||
|
||||
return exported_result
|
||||
|
||||
def _get_output_format(self) -> OutputFormat:
|
||||
if self.output_json:
|
||||
return OutputFormat.JSON
|
||||
if self.output_pydantic:
|
||||
return OutputFormat.PYDANTIC
|
||||
return OutputFormat.RAW
|
||||
|
||||
def _get_conversion_instructions(self, model: Type[BaseModel], llm: Any) -> str:
|
||||
instructions = "I'm gonna convert this raw text into valid JSON."
|
||||
if not self._is_gpt(llm):
|
||||
@@ -400,6 +389,9 @@ class Task(BaseModel):
|
||||
return instructions
|
||||
|
||||
def _save_output(self, content: str) -> None:
|
||||
if not self.output_file:
|
||||
raise Exception("Output file path is not set.")
|
||||
|
||||
directory = os.path.dirname(self.output_file)
|
||||
if directory and not os.path.exists(directory):
|
||||
os.makedirs(directory)
|
||||
|
||||
Reference in New Issue
Block a user