mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-08 15:48:29 +00:00
* better spacing * works with llama index * works on langchain custom just need delegation to work * cleanup for custom_agent class * works with different argument expectations for agent_executor * cleanup for hierarchial process, better agent_executor args handler and added to the crew agent doc page * removed code examples for langchain + llama index, added to docs instead * added key output if return is not a str for and added some tests * added hinting for CustomAgent class * removed pass as it was not needed * closer just need to figuire ou agentTools * running agents - llamaindex and langchain with base agent * some cleanup on baseAgent * minimum for agent to run for base class and ensure it works with hierarchical process * cleanup for original agent to take on BaseAgent class * Agent takes on langchainagent and cleanup across * token handling working for usage_metrics to continue working * installed llama-index, updated docs and added better name * fixed some type errors * base agent holds token_process * heirarchail process uses proper tools and no longer relies on hasattr for token_processes * removal of test_custom_agent_executions * this fixes copying agents * leveraging an executor class for trigger llamaindex agent * llama index now has ask_human * executor mixins added * added output converter base class * type listed * cleanup for output conversions and tokenprocess eliminated redundancy * properly handling tokens * simplified token calc handling * original agent with base agent builder structure setup * better docs * no more llama-index dep * cleaner docs * test fixes * poetry reverts and better docs * base_agent_tools set for third party agents * updated task and test fix
80 lines
2.7 KiB
Python
80 lines
2.7 KiB
Python
import json
|
|
|
|
from langchain.schema import HumanMessage, SystemMessage
|
|
from langchain_openai import ChatOpenAI
|
|
from pydantic import model_validator
|
|
from crewai.agents.agent_builder.utilities.base_output_converter_base import (
|
|
OutputConverter,
|
|
)
|
|
|
|
|
|
class ConverterError(Exception):
|
|
"""Error raised when Converter fails to parse the input."""
|
|
|
|
def __init__(self, message: str, *args: object) -> None:
|
|
super().__init__(message, *args)
|
|
self.message = message
|
|
|
|
|
|
class Converter(OutputConverter):
|
|
"""Class that converts text into either pydantic or json."""
|
|
|
|
@model_validator(mode="after")
|
|
def check_llm_provider(self):
|
|
if not self._is_gpt(self.llm):
|
|
self._is_gpt = False
|
|
|
|
def to_pydantic(self, current_attempt=1):
|
|
"""Convert text to pydantic."""
|
|
try:
|
|
if self._is_gpt:
|
|
return self._create_instructor().to_pydantic()
|
|
else:
|
|
return self._create_chain().invoke({})
|
|
except Exception as e:
|
|
if current_attempt < self.max_attemps:
|
|
return self.to_pydantic(current_attempt + 1)
|
|
return ConverterError(
|
|
f"Failed to convert text into a pydantic model due to the following error: {e}"
|
|
)
|
|
|
|
def to_json(self, current_attempt=1):
|
|
"""Convert text to json."""
|
|
try:
|
|
if self._is_gpt:
|
|
return self._create_instructor().to_json()
|
|
else:
|
|
return json.dumps(self._create_chain().invoke({}).model_dump())
|
|
except Exception:
|
|
if current_attempt < self.max_attemps:
|
|
return self.to_json(current_attempt + 1)
|
|
return ConverterError("Failed to convert text into JSON.")
|
|
|
|
def _create_instructor(self):
|
|
"""Create an instructor."""
|
|
from crewai.utilities import Instructor
|
|
|
|
inst = Instructor(
|
|
llm=self.llm,
|
|
max_attemps=self.max_attemps,
|
|
model=self.model,
|
|
content=self.text,
|
|
instructions=self.instructions,
|
|
)
|
|
return inst
|
|
|
|
def _create_chain(self):
|
|
"""Create a chain."""
|
|
from crewai.utilities.crew_pydantic_output_parser import (
|
|
CrewPydanticOutputParser,
|
|
)
|
|
|
|
parser = CrewPydanticOutputParser(pydantic_object=self.model)
|
|
new_prompt = SystemMessage(content=self.instructions) + HumanMessage(
|
|
content=self.text
|
|
)
|
|
return new_prompt | self.llm | parser
|
|
|
|
def _is_gpt(self, llm) -> bool: # type: ignore # BUG? Name "_is_gpt" defined on line 20 hides name from outer scope
|
|
return isinstance(llm, ChatOpenAI) and llm.openai_api_base is None
|