mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-16 03:28:30 +00:00
removing langchain
This commit is contained in:
@@ -94,8 +94,9 @@ class Agent(BaseAgent):
|
||||
allow_code_execution: Optional[bool] = Field(
|
||||
default=False, description="Enable code execution for the agent."
|
||||
)
|
||||
sliding_context_window: Optional[bool] = Field(
|
||||
default=False, description="Enable sliding context window for the agent."
|
||||
respect_context_window: Optional[bool] = Field(
|
||||
default=True,
|
||||
description="Keep messages under the context window size by summarizing content.",
|
||||
)
|
||||
max_retry_limit: int = Field(
|
||||
default=2,
|
||||
@@ -171,7 +172,7 @@ class Agent(BaseAgent):
|
||||
"input": task_prompt,
|
||||
"tool_names": self.agent_executor.tools_names,
|
||||
"tools": self.agent_executor.tools_description,
|
||||
"should_ask_for_human_input": task.human_input,
|
||||
"ask_for_human_input": task.human_input,
|
||||
}
|
||||
)["output"]
|
||||
except Exception as e:
|
||||
@@ -232,7 +233,7 @@ class Agent(BaseAgent):
|
||||
tools_description=self._render_text_description_and_args(parsed_tools),
|
||||
step_callback=self.step_callback,
|
||||
function_calling_llm=self.function_calling_llm,
|
||||
sliding_context_window=self.sliding_context_window,
|
||||
respect_context_window=self.respect_context_window,
|
||||
request_within_rpm_limit=self._rpm_controller.check_or_wait
|
||||
if self._rpm_controller
|
||||
else None,
|
||||
@@ -257,7 +258,7 @@ class Agent(BaseAgent):
|
||||
def get_output_converter(self, llm, text, model, instructions):
|
||||
return Converter(llm=llm, text=text, model=model, instructions=instructions)
|
||||
|
||||
def _parse_tools(self, tools: List[Any]) -> List[Any]: # type: ignore # Function "langchain_core.tools.tool" is not valid as a type
|
||||
def _parse_tools(self, tools: List[Any]) -> List[Any]: # type: ignore
|
||||
"""Parse tools to be used for the task."""
|
||||
tools_list = []
|
||||
try:
|
||||
|
||||
@@ -102,7 +102,8 @@ class BaseAgent(ABC, BaseModel):
|
||||
description="Maximum number of requests per minute for the agent execution to be respected.",
|
||||
)
|
||||
allow_delegation: bool = Field(
|
||||
default=True, description="Allow delegation of tasks to agents"
|
||||
default=False,
|
||||
description="Enable agent to delegate and ask questions among each other.",
|
||||
)
|
||||
tools: Optional[List[Any]] = Field(
|
||||
default_factory=list, description="Tools at agents' disposal"
|
||||
|
||||
@@ -24,9 +24,6 @@ class CrewAgentExecutorMixin:
|
||||
|
||||
def _should_force_answer(self) -> bool:
|
||||
"""Determine if a forced answer is required based on iteration count."""
|
||||
print("*** self.iterations", self.iterations)
|
||||
print("*** self.max_iter", self.max_iter)
|
||||
print("*** self.have_forced_answer", self.have_forced_answer)
|
||||
return (self.iterations >= self.max_iter) and not self.have_forced_answer
|
||||
|
||||
def _create_short_term_memory(self, output) -> None:
|
||||
|
||||
@@ -34,7 +34,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
step_callback: Any = None,
|
||||
original_tools: List[Any] = [],
|
||||
function_calling_llm: Any = None,
|
||||
sliding_context_window: bool = False,
|
||||
respect_context_window: bool = False,
|
||||
request_within_rpm_limit: Any = None,
|
||||
callbacks: List[Any] = [],
|
||||
):
|
||||
@@ -54,65 +54,75 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self.step_callback = step_callback
|
||||
self.tools_description = tools_description
|
||||
self.function_calling_llm = function_calling_llm
|
||||
self.sliding_context_window = sliding_context_window
|
||||
self.respect_context_window = respect_context_window
|
||||
self.request_within_rpm_limit = request_within_rpm_limit
|
||||
self.should_ask_for_human_input = False
|
||||
self.ask_for_human_input = False
|
||||
self.messages = []
|
||||
self.iterations = 0
|
||||
self.have_forced_answer = False
|
||||
self.name_to_tool_map = {tool.name: tool for tool in self.tools}
|
||||
|
||||
def invoke(self, inputs: Dict[str, str]) -> Dict[str, Any]:
|
||||
formatted_answer = None
|
||||
formatted_prompt = self._format_prompt(self.prompt, inputs)
|
||||
self.should_ask_for_human_input = inputs.get(
|
||||
"should_ask_for_human_input", False
|
||||
)
|
||||
self.messages = self._messages(formatted_prompt)
|
||||
print("starting invoke")
|
||||
if "system" in self.prompt:
|
||||
system_prompt = self._format_prompt(self.prompt["system"], inputs)
|
||||
user_prompt = self._format_prompt(self.prompt["user"], inputs)
|
||||
|
||||
formatted_answer = self._invoke_loop(formatted_answer)
|
||||
self.messages.append(self._format_msg(system_prompt, role="system"))
|
||||
self.messages.append(self._format_msg(user_prompt))
|
||||
else:
|
||||
user_prompt = self._format_prompt(self.prompt["prompt"], inputs)
|
||||
self.messages.append(self._format_msg(user_prompt))
|
||||
print("after messages")
|
||||
print(self.messages)
|
||||
self.ask_for_human_input = inputs.get("ask_for_human_input", False)
|
||||
|
||||
if self.should_ask_for_human_input:
|
||||
formatted_answer = self._invoke_loop()
|
||||
print("111after formatted_answer")
|
||||
print(formatted_answer)
|
||||
|
||||
if self.ask_for_human_input:
|
||||
human_feedback = self._ask_human_input(formatted_answer.output)
|
||||
if self.crew and self.crew._train:
|
||||
self._handle_crew_training_output(formatted_answer, human_feedback)
|
||||
|
||||
# Making sure we only ask for it once, so disabling for the next thought loop
|
||||
self.should_ask_for_human_input = False
|
||||
self.messages.append(
|
||||
{"role": "user", "content": f"Feedback: {human_feedback}"}
|
||||
)
|
||||
self.ask_for_human_input = False
|
||||
self.messages.append(self._format_msg(f"Feedback: {human_feedback}"))
|
||||
formatted_answer = self._invoke_loop(None)
|
||||
|
||||
return {"output": formatted_answer.output}
|
||||
|
||||
def _invoke_loop(self, formatted_answer):
|
||||
def _invoke_loop(self, formatted_answer=None):
|
||||
print("starting _invoke_loop")
|
||||
try:
|
||||
while not isinstance(formatted_answer, AgentFinish):
|
||||
# print('2222222')
|
||||
if not self.request_within_rpm_limit or self.request_within_rpm_limit():
|
||||
# print('3333333')
|
||||
print("******* messages")
|
||||
print(self.messages)
|
||||
answer = LLM(
|
||||
self.llm, stop=self.stop, callbacks=self.callbacks
|
||||
).call(self.messages)
|
||||
print("after answer")
|
||||
print(answer)
|
||||
|
||||
self.iterations += 1
|
||||
print("*** self.iterations", self.iterations)
|
||||
# if self.iterations > 11:
|
||||
# sadasd
|
||||
formatted_answer = self._format_answer(answer)
|
||||
print("222after formatted_answer")
|
||||
print(formatted_answer)
|
||||
|
||||
if isinstance(formatted_answer, AgentAction):
|
||||
# print('4444444')
|
||||
action_result = self._use_tool(formatted_answer)
|
||||
formatted_answer.text += f"\nObservation: {action_result}"
|
||||
# print(formatted_answer)
|
||||
print("after formatted_answer.text")
|
||||
print(formatted_answer.text)
|
||||
|
||||
if self.step_callback:
|
||||
formatted_answer.result = action_result
|
||||
self.step_callback(formatted_answer)
|
||||
if self._should_force_answer():
|
||||
print("starting _should_force_answer")
|
||||
if self.have_forced_answer:
|
||||
print("forcing answer")
|
||||
return {
|
||||
"output": self._i18n.errors(
|
||||
"force_final_answer_error"
|
||||
@@ -123,25 +133,25 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
f'\n{self._i18n.errors("force_final_answer")}'
|
||||
)
|
||||
self.have_forced_answer = True
|
||||
self.messages.append(
|
||||
{"role": "assistant", "content": formatted_answer.text}
|
||||
)
|
||||
|
||||
self.messages.append(
|
||||
self._format_msg(formatted_answer.text, role="assistant")
|
||||
)
|
||||
|
||||
except OutputParserException as e:
|
||||
# print('5555555')
|
||||
print("********* ERROR1")
|
||||
self.messages.append({"role": "assistant", "content": e.error})
|
||||
self._invoke_loop(formatted_answer)
|
||||
|
||||
except Exception as e:
|
||||
# print('6666666')
|
||||
print("*** e", e)
|
||||
print("********* ERRORw")
|
||||
print(e)
|
||||
if LLMContextLengthExceededException(str(e))._is_context_limit_error(
|
||||
str(e)
|
||||
):
|
||||
self._handle_context_length()
|
||||
self._invoke_loop(formatted_answer)
|
||||
|
||||
# print('7777777')
|
||||
return formatted_answer
|
||||
|
||||
def _use_tool(self, agent_action: AgentAction) -> None:
|
||||
@@ -176,25 +186,23 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
|
||||
def _summarize_messages(self) -> None:
|
||||
llm = LLM(self.llm)
|
||||
grouped_messages = []
|
||||
messages_groups = []
|
||||
|
||||
for message in self.messages:
|
||||
content = message["content"]
|
||||
for i in range(0, len(content), 5000):
|
||||
grouped_messages.append(content[i : i + 5000])
|
||||
messages_groups.append(content[i : i + 5000])
|
||||
|
||||
summarized_contents = []
|
||||
for group in grouped_messages:
|
||||
for group in messages_groups:
|
||||
summary = llm.call(
|
||||
[
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are a helpful assistant that summarizes text.",
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": f"Summarize the following text, make sure to include all the important information: {group}",
|
||||
},
|
||||
self._format_msg(
|
||||
self._i18n.slices("summarizer_system_message"), role="system"
|
||||
),
|
||||
self._format_msg(
|
||||
self._i18n.errors("sumamrize_instruction").format(group=group),
|
||||
),
|
||||
]
|
||||
)
|
||||
summarized_contents.append(summary)
|
||||
@@ -202,14 +210,13 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
merged_summary = " ".join(summarized_contents)
|
||||
|
||||
self.messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": f"This is a summary of our conversation so far:\n{merged_summary}",
|
||||
}
|
||||
self._format_msg(
|
||||
self._i18n.errors("summary").format(merged_summary=merged_summary)
|
||||
)
|
||||
]
|
||||
|
||||
def _handle_context_length(self) -> None:
|
||||
if self.sliding_context_window:
|
||||
if self.respect_context_window:
|
||||
self._logger.log(
|
||||
"debug",
|
||||
"Context length exceeded. Summarizing content to fit the model context window.",
|
||||
@@ -234,7 +241,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
|
||||
if (
|
||||
CrewTrainingHandler(TRAINING_DATA_FILE).load()
|
||||
and not self.should_ask_for_human_input
|
||||
and not self.ask_for_human_input
|
||||
):
|
||||
training_data = CrewTrainingHandler(TRAINING_DATA_FILE).load()
|
||||
if training_data.get(agent_id):
|
||||
@@ -243,7 +250,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
] = result.output
|
||||
CrewTrainingHandler(TRAINING_DATA_FILE).save(training_data)
|
||||
|
||||
if self.should_ask_for_human_input and human_feedback is not None:
|
||||
if self.ask_for_human_input and human_feedback is not None:
|
||||
training_data = {
|
||||
"initial_output": result.output,
|
||||
"human_feedback": human_feedback,
|
||||
@@ -261,7 +268,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
return prompt
|
||||
|
||||
def _format_answer(self, answer: str) -> str:
|
||||
return CrewAgentParser(agent=self).parse(answer)
|
||||
return CrewAgentParser(agent=self.agent).parse(answer)
|
||||
|
||||
def _messages(self, prompt: str) -> List[Dict[str, str]]:
|
||||
return [{"role": "user", "content": prompt}]
|
||||
def _format_msg(self, prompt: str, role: str = "user") -> List[Dict[str, str]]:
|
||||
return {"role": role, "content": prompt}
|
||||
|
||||
@@ -10,8 +10,6 @@ from crewai.project import CrewBase, agent, crew, task
|
||||
@CrewBase
|
||||
class {{crew_name}}Crew():
|
||||
"""{{crew_name}} crew"""
|
||||
agents_config = 'config/agents.yaml'
|
||||
tasks_config = 'config/tasks.yaml'
|
||||
|
||||
@agent
|
||||
def researcher(self) -> Agent:
|
||||
|
||||
@@ -6,7 +6,6 @@ from concurrent.futures import Future
|
||||
from hashlib import md5
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
|
||||
|
||||
from langchain_core.callbacks import BaseCallbackHandler
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
BaseModel,
|
||||
@@ -68,7 +67,6 @@ class Crew(BaseModel):
|
||||
manager_llm: The language model that will run manager agent.
|
||||
manager_agent: Custom agent that will be used as manager.
|
||||
memory: Whether the crew should use memory to store memories of it's execution.
|
||||
manager_callbacks: The callback handlers to be executed by the manager agent when hierarchical process is used
|
||||
cache: Whether the crew should use a cache to store the results of the tools execution.
|
||||
function_calling_llm: The language model that will run the tool calling for all the agents.
|
||||
process: The process flow that the crew will follow (e.g., sequential, hierarchical).
|
||||
@@ -126,10 +124,6 @@ class Crew(BaseModel):
|
||||
manager_agent: Optional[BaseAgent] = Field(
|
||||
description="Custom agent that will be used as manager.", default=None
|
||||
)
|
||||
manager_callbacks: Optional[List[InstanceOf[BaseCallbackHandler]]] = Field(
|
||||
default=None,
|
||||
description="A list of callback handlers to be executed by the manager agent when hierarchical process is used",
|
||||
)
|
||||
function_calling_llm: Optional[Any] = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
|
||||
@@ -1,95 +0,0 @@
|
||||
from typing import Callable, List, Dict, Any
|
||||
from functools import wraps
|
||||
|
||||
|
||||
class Flow:
|
||||
def __init__(self):
|
||||
self._start_method = None
|
||||
self._listeners: Dict[str, List[str]] = {}
|
||||
self._methods: Dict[str, Callable] = {}
|
||||
|
||||
def run(self):
|
||||
if not self._start_method:
|
||||
raise ValueError("No start method defined")
|
||||
|
||||
result = self._methods[self._start_method](self)
|
||||
self._execute_listeners(self._start_method, result)
|
||||
|
||||
def _execute_listeners(self, trigger_method: str, result: Any):
|
||||
if trigger_method in self._listeners:
|
||||
for listener in self._listeners[trigger_method]:
|
||||
listener_result = self._methods[listener](self, result)
|
||||
self._execute_listeners(listener, listener_result)
|
||||
|
||||
|
||||
def start():
|
||||
def decorator(func):
|
||||
@wraps(func)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
if not self._start_method:
|
||||
self._start_method = func.__name__
|
||||
return func(self, *args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def listen(*trigger_methods):
|
||||
def decorator(func):
|
||||
@wraps(func)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
for trigger in trigger_methods:
|
||||
trigger_name = trigger.__name__ if callable(trigger) else trigger
|
||||
if trigger_name not in self._listeners:
|
||||
self._listeners[trigger_name] = []
|
||||
self._listeners[trigger_name].append(func.__name__)
|
||||
return func(self, *args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
class FlowMeta(type):
|
||||
def __new__(mcs, name, bases, attrs):
|
||||
new_cls = super().__new__(mcs, name, bases, attrs)
|
||||
for name, method in attrs.items():
|
||||
if hasattr(method, "_is_start"):
|
||||
new_cls._start_method = name
|
||||
if hasattr(method, "_listeners"):
|
||||
for trigger in method._listeners:
|
||||
if trigger not in new_cls._listeners:
|
||||
new_cls._listeners[trigger] = []
|
||||
new_cls._listeners[trigger].append(name)
|
||||
new_cls._methods[name] = method
|
||||
return new_cls
|
||||
|
||||
|
||||
class BaseFlow(Flow, metaclass=FlowMeta):
|
||||
_start_method = None
|
||||
_listeners = {}
|
||||
_methods = {}
|
||||
|
||||
|
||||
# Example usage:
|
||||
class ExampleFlow(BaseFlow):
|
||||
@start()
|
||||
def start_method(self):
|
||||
print("Starting the flow")
|
||||
return "Start result"
|
||||
|
||||
@listen(start_method)
|
||||
def second_method(self, result):
|
||||
print(f"Second method, received: {result}")
|
||||
return "Second result"
|
||||
|
||||
@listen(second_method)
|
||||
def third_method(self, result):
|
||||
print(f"Third method, received: {result}")
|
||||
return "Third result"
|
||||
|
||||
|
||||
# Run the flow
|
||||
flow = ExampleFlow()
|
||||
flow.run()
|
||||
@@ -243,13 +243,18 @@ class Task(BaseModel):
|
||||
tools = tools or self.tools or []
|
||||
|
||||
self.processed_by_agents.add(agent.role)
|
||||
print("====================================================")
|
||||
print("context", self.prompt_context)
|
||||
print("context", agent.role)
|
||||
print("context", context)
|
||||
|
||||
result = agent.execute_task(
|
||||
task=self,
|
||||
context=context,
|
||||
tools=tools,
|
||||
)
|
||||
|
||||
print("result", result)
|
||||
print("====================================================")
|
||||
pydantic_output, json_output = self._export_output(result)
|
||||
|
||||
task_output = TaskOutput(
|
||||
@@ -276,7 +281,9 @@ class Task(BaseModel):
|
||||
content = (
|
||||
json_output
|
||||
if json_output
|
||||
else pydantic_output.model_dump_json() if pydantic_output else result
|
||||
else pydantic_output.model_dump_json()
|
||||
if pydantic_output
|
||||
else result
|
||||
)
|
||||
self._save_file(content)
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
from langchain.tools import StructuredTool
|
||||
|
||||
from crewai.agents.agent_builder.utilities.base_agent_tool import BaseAgentTools
|
||||
|
||||
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
import json
|
||||
from typing import Any, List
|
||||
|
||||
import regex
|
||||
from langchain.output_parsers import PydanticOutputParser
|
||||
from langchain_core.exceptions import OutputParserException
|
||||
from langchain_core.outputs import Generation
|
||||
from pydantic import ValidationError
|
||||
|
||||
|
||||
class ToolOutputParser(PydanticOutputParser):
|
||||
"""Parses the function calling of a tool usage and it's arguments."""
|
||||
|
||||
def parse_result(self, result: List[Generation], *, partial: bool = False) -> Any:
|
||||
result[0].text = self._transform_in_valid_json(result[0].text)
|
||||
json_object = super().parse_result(result)
|
||||
try:
|
||||
return self.pydantic_object.parse_obj(json_object)
|
||||
except ValidationError as e:
|
||||
name = self.pydantic_object.__name__
|
||||
msg = f"Failed to parse {name} from completion {json_object}. Got: {e}"
|
||||
raise OutputParserException(msg, llm_output=json_object)
|
||||
|
||||
def _transform_in_valid_json(self, text) -> str:
|
||||
text = text.replace("```", "").replace("json", "")
|
||||
json_pattern = r"\{(?:[^{}]|(?R))*\}"
|
||||
matches = regex.finditer(json_pattern, text)
|
||||
|
||||
for match in matches:
|
||||
try:
|
||||
# Attempt to parse the matched string as JSON
|
||||
json_obj = json.loads(match.group())
|
||||
# Return the first successfully parsed JSON object
|
||||
json_obj = json.dumps(json_obj)
|
||||
return str(json_obj)
|
||||
except json.JSONDecodeError:
|
||||
# If parsing fails, skip to the next match
|
||||
continue
|
||||
return text
|
||||
@@ -15,9 +15,12 @@
|
||||
"final_answer_format": "If you don't need to use any more tools, you must give your best complete final answer, make sure it satisfy the expect criteria, use the EXACT format below:\n\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\n\n",
|
||||
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nI just remembered the expected format I must follow:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described\n\n",
|
||||
"task_with_context": "{task}\n\nThis is the context you're working with:\n{context}",
|
||||
"expected_output": "\nThis is the expect criteria for your final answer: {expected_output} \n you MUST return the actual complete content as the final answer, not a summary.",
|
||||
"expected_output": "\nThis is the expect criteria for your final answer: {expected_output}\nyou MUST return the actual complete content as the final answer, not a summary.",
|
||||
"human_feedback": "You got human feedback on your work, re-evaluate it and give a new Final Answer when ready.\n {human_feedback}",
|
||||
"getting_input": "This is the agent's final answer: {final_answer}\nPlease provide feedback: "
|
||||
"getting_input": "This is the agent's final answer: {final_answer}\nPlease provide feedback: ",
|
||||
"summarizer_system_message": "You are a helpful assistant that summarizes text.",
|
||||
"sumamrize_instruction": "Summarize the following text, make sure to include all the important information: {group}",
|
||||
"summary": "This is a summary of our conversation so far:\n{merged_summary}"
|
||||
},
|
||||
"errors": {
|
||||
"force_final_answer_error": "I can't keep going, this was the best I could do.\n {formatted_answer.text}",
|
||||
|
||||
@@ -213,9 +213,8 @@ def get_conversion_instructions(model: Type[BaseModel], llm: Any) -> str:
|
||||
|
||||
|
||||
def is_gpt(llm: Any) -> bool:
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
return isinstance(llm, ChatOpenAI) and llm.openai_api_base is None
|
||||
"""Return if llm provided is of gpt from openai."""
|
||||
return "gpt" in str(llm).lower()
|
||||
|
||||
|
||||
def create_converter(
|
||||
|
||||
@@ -4,7 +4,6 @@ from crewai.agent import Agent
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.telemetry import Telemetry
|
||||
from langchain_openai import ChatOpenAI
|
||||
from pydantic import BaseModel, Field
|
||||
from rich.box import HEAVY_EDGE
|
||||
from rich.console import Console
|
||||
@@ -51,7 +50,7 @@ class CrewEvaluator:
|
||||
),
|
||||
backstory="Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed",
|
||||
verbose=False,
|
||||
llm=ChatOpenAI(model=self.openai_model_name),
|
||||
llm=self.openai_model_name,
|
||||
)
|
||||
|
||||
def _evaluation_task(
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
from typing import Any, List, Optional
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.agent import Agent
|
||||
@@ -27,7 +25,7 @@ class CrewPlanner:
|
||||
self.tasks = tasks
|
||||
|
||||
if planning_agent_llm is None:
|
||||
self.planning_agent_llm = ChatOpenAI(model="gpt-4o-mini")
|
||||
self.planning_agent_llm = "gpt-4o-mini"
|
||||
else:
|
||||
self.planning_agent_llm = planning_agent_llm
|
||||
|
||||
|
||||
@@ -20,18 +20,24 @@ class Prompts(BaseModel):
|
||||
slices.append("tools")
|
||||
else:
|
||||
slices.append("no_tools")
|
||||
|
||||
system = self._build_prompt(slices)
|
||||
slices.append("task")
|
||||
|
||||
if not self.system_template and not self.prompt_template:
|
||||
return self._build_prompt(slices)
|
||||
return {
|
||||
"system": system,
|
||||
"user": self._build_prompt(["task"]),
|
||||
"prompt": self._build_prompt(slices),
|
||||
}
|
||||
else:
|
||||
return self._build_prompt(
|
||||
slices,
|
||||
self.system_template,
|
||||
self.prompt_template,
|
||||
self.response_template,
|
||||
)
|
||||
return {
|
||||
"prompt": self._build_prompt(
|
||||
slices,
|
||||
self.system_template,
|
||||
self.prompt_template,
|
||||
self.response_template,
|
||||
)
|
||||
}
|
||||
|
||||
def _build_prompt(
|
||||
self,
|
||||
|
||||
@@ -11,10 +11,7 @@ class TokenCalcHandler(CustomLogger):
|
||||
return
|
||||
|
||||
self.token_cost_process.sum_successful_requests(1)
|
||||
print("*** response_obj", response_obj)
|
||||
self.token_cost_process.sum_prompt_tokens(
|
||||
response_obj["usage"]["prompt_tokens"]
|
||||
)
|
||||
self.token_cost_process.sum_prompt_tokens(response_obj["usage"].prompt_tokens)
|
||||
self.token_cost_process.sum_completion_tokens(
|
||||
response_obj["usage"]["completion_tokens"]
|
||||
response_obj["usage"].completion_tokens
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user