mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-11 00:58:30 +00:00
WIP
This commit is contained in:
@@ -1,7 +1,7 @@
|
|||||||
import re
|
import re
|
||||||
import shutil
|
import shutil
|
||||||
import subprocess
|
import subprocess
|
||||||
from typing import Any, Dict, List, Literal, Optional, Sequence, Union
|
from typing import Any, Dict, List, Literal, Optional, Sequence, Union, cast
|
||||||
|
|
||||||
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
|
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
|
||||||
|
|
||||||
@@ -184,19 +184,12 @@ class Agent(BaseAgent):
|
|||||||
# append specific instructions to the task prompt to ensure
|
# append specific instructions to the task prompt to ensure
|
||||||
# that the final answer does not include any code block markers
|
# that the final answer does not include any code block markers
|
||||||
if task.output_json or task.output_pydantic:
|
if task.output_json or task.output_pydantic:
|
||||||
# Generate the schema based on the output format
|
# Choose the output format, preferring output_json if available
|
||||||
if task.output_json:
|
output_format = (
|
||||||
# schema = json.dumps(task.output_json, indent=2)
|
task.output_json if task.output_json else task.output_pydantic
|
||||||
schema = generate_model_description(task.output_json)
|
)
|
||||||
task_prompt += "\n" + self.i18n.slice(
|
schema = generate_model_description(cast(type, output_format))
|
||||||
"formatted_task_instructions"
|
task_prompt += f"\n{self.i18n.slice('formatted_task_instructions').format(output_format=schema)}"
|
||||||
).format(output_format=schema)
|
|
||||||
|
|
||||||
elif task.output_pydantic:
|
|
||||||
schema = generate_model_description(task.output_pydantic)
|
|
||||||
task_prompt += "\n" + self.i18n.slice(
|
|
||||||
"formatted_task_instructions"
|
|
||||||
).format(output_format=schema)
|
|
||||||
|
|
||||||
if context:
|
if context:
|
||||||
task_prompt = self.i18n.slice("task_with_context").format(
|
task_prompt = self.i18n.slice("task_with_context").format(
|
||||||
|
|||||||
@@ -163,6 +163,7 @@ class BaseAgent(ABC, BaseModel):
|
|||||||
tool meets these criteria, it is processed and added to the list of
|
tool meets these criteria, it is processed and added to the list of
|
||||||
tools. Otherwise, a ValueError is raised.
|
tools. Otherwise, a ValueError is raised.
|
||||||
"""
|
"""
|
||||||
|
print(f"Validating tools: {tools}")
|
||||||
processed_tools = []
|
processed_tools = []
|
||||||
for tool in tools:
|
for tool in tools:
|
||||||
if isinstance(tool, BaseTool):
|
if isinstance(tool, BaseTool):
|
||||||
@@ -180,6 +181,7 @@ class BaseAgent(ABC, BaseModel):
|
|||||||
"Tool must be an instance of BaseTool or "
|
"Tool must be an instance of BaseTool or "
|
||||||
"an object with 'name', 'func', and 'description' attributes."
|
"an object with 'name', 'func', and 'description' attributes."
|
||||||
)
|
)
|
||||||
|
print(f"Processed tools: {processed_tools}")
|
||||||
return processed_tools
|
return processed_tools
|
||||||
|
|
||||||
@model_validator(mode="after")
|
@model_validator(mode="after")
|
||||||
@@ -338,7 +340,15 @@ class BaseAgent(ABC, BaseModel):
|
|||||||
if self.cache:
|
if self.cache:
|
||||||
self.cache_handler = cache_handler
|
self.cache_handler = cache_handler
|
||||||
self.tools_handler.cache = cache_handler
|
self.tools_handler.cache = cache_handler
|
||||||
self.create_agent_executor()
|
print(f"Setting cache handler for agent: {self.id}")
|
||||||
|
# Only create the executor if it hasn't been created yet.
|
||||||
|
if self.agent_executor is None:
|
||||||
|
self.create_agent_executor()
|
||||||
|
else:
|
||||||
|
print(
|
||||||
|
"Agent executor already exists, skipping creation in set_cache_handler."
|
||||||
|
)
|
||||||
|
print(f"Cache handler set for agent: {self.id}")
|
||||||
|
|
||||||
def increment_formatting_errors(self) -> None:
|
def increment_formatting_errors(self) -> None:
|
||||||
self.formatting_errors += 1
|
self.formatting_errors += 1
|
||||||
@@ -351,4 +361,12 @@ class BaseAgent(ABC, BaseModel):
|
|||||||
"""
|
"""
|
||||||
if not self._rpm_controller:
|
if not self._rpm_controller:
|
||||||
self._rpm_controller = rpm_controller
|
self._rpm_controller = rpm_controller
|
||||||
self.create_agent_executor()
|
print(f"Setting RPM controller for agent: {self.id}")
|
||||||
|
# Only create the executor if it hasn't been created yet.
|
||||||
|
if self.agent_executor is None:
|
||||||
|
self.create_agent_executor()
|
||||||
|
else:
|
||||||
|
print(
|
||||||
|
"Agent executor already exists, skipping creation in set_rpm_controller."
|
||||||
|
)
|
||||||
|
print(f"RPM controller set for agent: {self.id}")
|
||||||
|
|||||||
@@ -82,7 +82,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
tool.name: tool for tool in self.tools
|
tool.name: tool for tool in self.tools
|
||||||
}
|
}
|
||||||
self.stop = stop_words
|
self.stop = stop_words
|
||||||
self.llm.stop = list(set(self.llm.stop + self.stop))
|
self.llm.stop = list(set((self.llm.stop or []) + self.stop))
|
||||||
|
|
||||||
def invoke(self, inputs: Dict[str, str]) -> Dict[str, Any]:
|
def invoke(self, inputs: Dict[str, str]) -> Dict[str, Any]:
|
||||||
if "system" in self.prompt:
|
if "system" in self.prompt:
|
||||||
|
|||||||
254
src/crewai/agents/langchain_agent_adapter.py
Normal file
254
src/crewai/agents/langchain_agent_adapter.py
Normal file
@@ -0,0 +1,254 @@
|
|||||||
|
from typing import Any, List, Optional, Type, cast
|
||||||
|
|
||||||
|
from pydantic import Field
|
||||||
|
|
||||||
|
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||||
|
from crewai.task import Task
|
||||||
|
from crewai.tools import BaseTool
|
||||||
|
from crewai.utilities.converter import Converter, generate_model_description
|
||||||
|
|
||||||
|
|
||||||
|
class LangChainAgentAdapter(BaseAgent):
|
||||||
|
"""
|
||||||
|
Adapter class to wrap a LangChain agent and make it compatible with CrewAI's BaseAgent interface.
|
||||||
|
|
||||||
|
Note:
|
||||||
|
- This adapter does not require LangChain as a dependency.
|
||||||
|
- It wraps an external LangChain agent (passed as any type) and delegates calls
|
||||||
|
such as execute_task() to the LangChain agent's invoke() method.
|
||||||
|
- Extended logic is added to build prompts, incorporate memory, knowledge, training hints,
|
||||||
|
and now a human feedback loop similar to what is done in CrewAgentExecutor.
|
||||||
|
"""
|
||||||
|
|
||||||
|
langchain_agent: Any = Field(
|
||||||
|
...,
|
||||||
|
description="The wrapped LangChain runnable agent instance. It is expected to have an 'invoke' method.",
|
||||||
|
)
|
||||||
|
function_calling_llm: Optional[Any] = Field(
|
||||||
|
default=None, description="Optional function calling LLM."
|
||||||
|
)
|
||||||
|
step_callback: Optional[Any] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Callback executed after each step of agent execution.",
|
||||||
|
)
|
||||||
|
allow_code_execution: Optional[bool] = Field(
|
||||||
|
default=False, description="Enable code execution for the agent."
|
||||||
|
)
|
||||||
|
multimodal: bool = Field(
|
||||||
|
default=False, description="Whether the agent is multimodal."
|
||||||
|
)
|
||||||
|
i18n: Any = None
|
||||||
|
crew: Any = None
|
||||||
|
knowledge: Any = None
|
||||||
|
tools: Optional[List[BaseTool]] = None
|
||||||
|
|
||||||
|
class Config:
|
||||||
|
arbitrary_types_allowed = True
|
||||||
|
|
||||||
|
def execute_task(
|
||||||
|
self,
|
||||||
|
task: Task,
|
||||||
|
context: Optional[str] = None,
|
||||||
|
tools: Optional[List[BaseTool]] = None,
|
||||||
|
) -> str:
|
||||||
|
"""
|
||||||
|
Execute a task by building the full task prompt (with memory, knowledge, tool instructions,
|
||||||
|
and training hints) then delegating execution to the wrapped LangChain agent.
|
||||||
|
If the task requires human input, a feedback loop is run that mimics the CrewAgentExecutor.
|
||||||
|
"""
|
||||||
|
task_prompt = task.prompt()
|
||||||
|
|
||||||
|
if task.output_json or task.output_pydantic:
|
||||||
|
# Choose the output format, preferring output_json if available
|
||||||
|
output_format = (
|
||||||
|
task.output_json if task.output_json else task.output_pydantic
|
||||||
|
)
|
||||||
|
schema = generate_model_description(cast(type, output_format))
|
||||||
|
instruction = self.i18n.slice("formatted_task_instructions").format(
|
||||||
|
output_format=schema
|
||||||
|
)
|
||||||
|
task_prompt += f"\n{instruction}"
|
||||||
|
|
||||||
|
if context:
|
||||||
|
task_prompt = self.i18n.slice("task_with_context").format(
|
||||||
|
task=task_prompt, context=context
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.crew and self.crew.memory:
|
||||||
|
from crewai.memory.contextual.contextual_memory import ContextualMemory
|
||||||
|
|
||||||
|
contextual_memory = ContextualMemory(
|
||||||
|
self.crew.memory_config,
|
||||||
|
self.crew._short_term_memory,
|
||||||
|
self.crew._long_term_memory,
|
||||||
|
self.crew._entity_memory,
|
||||||
|
self.crew._user_memory,
|
||||||
|
)
|
||||||
|
memory = contextual_memory.build_context_for_task(task, context)
|
||||||
|
if memory.strip():
|
||||||
|
task_prompt += self.i18n.slice("memory").format(memory=memory)
|
||||||
|
|
||||||
|
if self.knowledge:
|
||||||
|
agent_knowledge_snippets = self.knowledge.query([task.prompt()])
|
||||||
|
if agent_knowledge_snippets:
|
||||||
|
from crewai.knowledge.utils.knowledge_utils import (
|
||||||
|
extract_knowledge_context,
|
||||||
|
)
|
||||||
|
|
||||||
|
agent_knowledge_context = extract_knowledge_context(
|
||||||
|
agent_knowledge_snippets
|
||||||
|
)
|
||||||
|
if agent_knowledge_context:
|
||||||
|
task_prompt += agent_knowledge_context
|
||||||
|
|
||||||
|
if self.crew:
|
||||||
|
knowledge_snippets = self.crew.query_knowledge([task.prompt()])
|
||||||
|
if knowledge_snippets:
|
||||||
|
from crewai.knowledge.utils.knowledge_utils import (
|
||||||
|
extract_knowledge_context,
|
||||||
|
)
|
||||||
|
|
||||||
|
crew_knowledge_context = extract_knowledge_context(knowledge_snippets)
|
||||||
|
if crew_knowledge_context:
|
||||||
|
task_prompt += crew_knowledge_context
|
||||||
|
|
||||||
|
tools = tools or self.tools or []
|
||||||
|
self.create_agent_executor(tools=tools)
|
||||||
|
|
||||||
|
if self.crew and getattr(self.crew, "_train", False):
|
||||||
|
task_prompt = self._training_handler(task_prompt=task_prompt)
|
||||||
|
else:
|
||||||
|
task_prompt = self._use_trained_data(task_prompt=task_prompt)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Initial invocation of the LangChain agent
|
||||||
|
result = self.agent_executor.invoke(
|
||||||
|
{
|
||||||
|
"input": task_prompt,
|
||||||
|
"tool_names": getattr(self.agent_executor, "tools_names", ""),
|
||||||
|
"tools": getattr(self.agent_executor, "tools_description", ""),
|
||||||
|
"ask_for_human_input": task.human_input,
|
||||||
|
}
|
||||||
|
)["output"]
|
||||||
|
|
||||||
|
# If human feedback is required, enter a feedback loop
|
||||||
|
if task.human_input:
|
||||||
|
result = self._handle_human_feedback(result)
|
||||||
|
except Exception as e:
|
||||||
|
# Example: you could add retry logic here if desired.
|
||||||
|
raise e
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _handle_human_feedback(self, current_output: str) -> str:
|
||||||
|
"""
|
||||||
|
Implements a feedback loop that prompts the user for feedback and then instructs
|
||||||
|
the underlying LangChain agent to regenerate its answer with the requested changes.
|
||||||
|
"""
|
||||||
|
while True:
|
||||||
|
print("\nAgent output:")
|
||||||
|
print(current_output)
|
||||||
|
# Prompt the user for feedback
|
||||||
|
feedback = input("\nEnter your feedback (or press Enter to accept): ")
|
||||||
|
if not feedback.strip():
|
||||||
|
break # No feedback provided, exit the loop
|
||||||
|
|
||||||
|
# Construct a new prompt with explicit instructions
|
||||||
|
new_prompt = (
|
||||||
|
f"Below is your previous answer:\n{current_output}\n\n"
|
||||||
|
f"Based on the following feedback: '{feedback}', please regenerate your answer with the requested details. "
|
||||||
|
f"Specifically, display 10 bullet points in each section. Provide the complete updated answer below.\n\nUpdated answer:"
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
invocation = self.agent_executor.invoke(
|
||||||
|
{
|
||||||
|
"input": new_prompt,
|
||||||
|
"tool_names": getattr(self.agent_executor, "tools_names", ""),
|
||||||
|
"tools": getattr(self.agent_executor, "tools_description", ""),
|
||||||
|
"ask_for_human_input": True,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
current_output = invocation["output"]
|
||||||
|
except Exception as e:
|
||||||
|
print("Error during re-invocation with feedback:", e)
|
||||||
|
break
|
||||||
|
|
||||||
|
return current_output
|
||||||
|
|
||||||
|
def _generate_model_description(self, model: Any) -> str:
|
||||||
|
"""
|
||||||
|
Generates a string description (schema) for the expected output.
|
||||||
|
This is a placeholder that should call the actual implementation.
|
||||||
|
"""
|
||||||
|
from crewai.utilities.converter import generate_model_description
|
||||||
|
|
||||||
|
return generate_model_description(model)
|
||||||
|
|
||||||
|
def _training_handler(self, task_prompt: str) -> str:
|
||||||
|
"""
|
||||||
|
Append training instructions from Crew data to the task prompt.
|
||||||
|
"""
|
||||||
|
from crewai.utilities.constants import TRAINING_DATA_FILE
|
||||||
|
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||||
|
|
||||||
|
data = CrewTrainingHandler(TRAINING_DATA_FILE).load()
|
||||||
|
if data:
|
||||||
|
agent_id = str(self.id)
|
||||||
|
if data.get(agent_id):
|
||||||
|
human_feedbacks = [
|
||||||
|
i["human_feedback"] for i in data.get(agent_id, {}).values()
|
||||||
|
]
|
||||||
|
task_prompt += (
|
||||||
|
"\n\nYou MUST follow these instructions: \n "
|
||||||
|
+ "\n - ".join(human_feedbacks)
|
||||||
|
)
|
||||||
|
return task_prompt
|
||||||
|
|
||||||
|
def _use_trained_data(self, task_prompt: str) -> str:
|
||||||
|
"""
|
||||||
|
Append pre-trained instructions from Crew data to the task prompt.
|
||||||
|
"""
|
||||||
|
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE
|
||||||
|
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||||
|
|
||||||
|
data = CrewTrainingHandler(TRAINED_AGENTS_DATA_FILE).load()
|
||||||
|
if data and (trained_data_output := data.get(getattr(self, "role", "default"))):
|
||||||
|
task_prompt += (
|
||||||
|
"\n\nYou MUST follow these instructions: \n - "
|
||||||
|
+ "\n - ".join(trained_data_output["suggestions"])
|
||||||
|
)
|
||||||
|
return task_prompt
|
||||||
|
|
||||||
|
def create_agent_executor(self, tools: Optional[List[BaseTool]] = None) -> None:
|
||||||
|
"""
|
||||||
|
Creates an agent executor using LangChain's AgentExecutor.
|
||||||
|
"""
|
||||||
|
from importlib import import_module
|
||||||
|
|
||||||
|
langchain_agents = import_module("langchain.agents")
|
||||||
|
AgentExecutor = getattr(langchain_agents, "AgentExecutor")
|
||||||
|
used_tools = tools or self.tools or []
|
||||||
|
|
||||||
|
print(f"Creating agent executor for langchain agent: {self.langchain_agent}")
|
||||||
|
print("Passing tools: ", used_tools)
|
||||||
|
self.agent_executor = AgentExecutor.from_agent_and_tools(
|
||||||
|
agent=self.langchain_agent,
|
||||||
|
tools=used_tools,
|
||||||
|
verbose=getattr(self, "verbose", True),
|
||||||
|
)
|
||||||
|
print("Created agent executor for langchain agent")
|
||||||
|
|
||||||
|
def _parse_tools(self, tools: List[BaseTool]) -> List[BaseTool]:
|
||||||
|
return tools
|
||||||
|
|
||||||
|
def get_delegation_tools(self, agents: List["BaseAgent"]) -> List[BaseTool]:
|
||||||
|
return []
|
||||||
|
|
||||||
|
def get_output_converter(
|
||||||
|
self,
|
||||||
|
llm: Any,
|
||||||
|
text: str,
|
||||||
|
model: Optional[Type] = None,
|
||||||
|
instructions: str = "",
|
||||||
|
) -> Converter:
|
||||||
|
return Converter(llm=llm, text=text, model=model, instructions=instructions)
|
||||||
@@ -455,8 +455,6 @@ class Crew(BaseModel):
|
|||||||
)
|
)
|
||||||
return self
|
return self
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def key(self) -> str:
|
def key(self) -> str:
|
||||||
source = [agent.key for agent in self.agents] + [
|
source = [agent.key for agent in self.agents] + [
|
||||||
@@ -928,13 +926,13 @@ class Crew(BaseModel):
|
|||||||
def _create_crew_output(self, task_outputs: List[TaskOutput]) -> CrewOutput:
|
def _create_crew_output(self, task_outputs: List[TaskOutput]) -> CrewOutput:
|
||||||
if not task_outputs:
|
if not task_outputs:
|
||||||
raise ValueError("No task outputs available to create crew output.")
|
raise ValueError("No task outputs available to create crew output.")
|
||||||
|
|
||||||
# Filter out empty outputs and get the last valid one as the main output
|
# Filter out empty outputs and get the last valid one as the main output
|
||||||
valid_outputs = [t for t in task_outputs if t.raw]
|
valid_outputs = [t for t in task_outputs if t.raw]
|
||||||
if not valid_outputs:
|
if not valid_outputs:
|
||||||
raise ValueError("No valid task outputs available to create crew output.")
|
raise ValueError("No valid task outputs available to create crew output.")
|
||||||
final_task_output = valid_outputs[-1]
|
final_task_output = valid_outputs[-1]
|
||||||
|
|
||||||
final_string_output = final_task_output.raw
|
final_string_output = final_task_output.raw
|
||||||
self._finish_execution(final_string_output)
|
self._finish_execution(final_string_output)
|
||||||
token_usage = self.calculate_usage_metrics()
|
token_usage = self.calculate_usage_metrics()
|
||||||
|
|||||||
1
title=src/crewai/agents/langchain_agent_adapter.py
Normal file
1
title=src/crewai/agents/langchain_agent_adapter.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
|
||||||
Reference in New Issue
Block a user