mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-24 23:58:15 +00:00
chore: merge main into release/v1.0.0
Resolved merge conflicts: - agent.py: kept main's docker validation, restored get_platform_tools method, fixed KnowledgeRetrievalStartedEvent position - agent_utils.py: merged both LLM and LiteAgent imports - test_cache_hitting.yaml: accepted main's test recordings - Removed 4 deprecated test cassettes from old tests/ directory Key changes from main: - Enhanced knowledge event handling with from_agent/from_task parameters - Updated LLMMessage typing in kickoff methods - Added guardrail parameter to async kickoff - Restored runtime validation in guardrail.py
This commit is contained in:
@@ -5,7 +5,6 @@ from collections.abc import Callable, Sequence
|
||||
from typing import (
|
||||
Any,
|
||||
Literal,
|
||||
cast,
|
||||
)
|
||||
|
||||
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
|
||||
@@ -54,6 +53,7 @@ from crewai.utilities.converter import generate_model_description
|
||||
from crewai.utilities.llm_utils import create_llm
|
||||
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
from crewai.utilities.types import LLMMessage
|
||||
|
||||
|
||||
class Agent(BaseAgent):
|
||||
@@ -348,12 +348,13 @@ class Agent(BaseAgent):
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=KnowledgeRetrievalStartedEvent(
|
||||
agent=self,
|
||||
from_task=task,
|
||||
from_agent=self,
|
||||
),
|
||||
)
|
||||
try:
|
||||
self.knowledge_search_query = self._get_knowledge_search_query(
|
||||
task_prompt
|
||||
task_prompt, task
|
||||
)
|
||||
if self.knowledge_search_query:
|
||||
# Quering agent specific knowledge
|
||||
@@ -383,7 +384,8 @@ class Agent(BaseAgent):
|
||||
self,
|
||||
event=KnowledgeRetrievalCompletedEvent(
|
||||
query=self.knowledge_search_query,
|
||||
agent=self,
|
||||
from_task=task,
|
||||
from_agent=self,
|
||||
retrieved_knowledge=(
|
||||
(self.agent_knowledge_context or "")
|
||||
+ (
|
||||
@@ -401,8 +403,9 @@ class Agent(BaseAgent):
|
||||
self,
|
||||
event=KnowledgeSearchQueryFailedEvent(
|
||||
query=self.knowledge_search_query or "",
|
||||
agent=self,
|
||||
error=str(e),
|
||||
from_task=task,
|
||||
from_agent=self,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -577,7 +580,7 @@ class Agent(BaseAgent):
|
||||
agent=self,
|
||||
crew=self.crew,
|
||||
tools=parsed_tools,
|
||||
prompt=cast(dict[str, str], prompt),
|
||||
prompt=prompt, # type: ignore[arg-type]
|
||||
original_tools=raw_tools,
|
||||
stop_words=stop_words,
|
||||
max_iter=self.max_iter,
|
||||
@@ -744,13 +747,14 @@ class Agent(BaseAgent):
|
||||
def set_fingerprint(self, fingerprint: Fingerprint):
|
||||
self.security_config.fingerprint = fingerprint
|
||||
|
||||
def _get_knowledge_search_query(self, task_prompt: str) -> str | None:
|
||||
def _get_knowledge_search_query(self, task_prompt: str, task: Task) -> str | None:
|
||||
"""Generate a search query for the knowledge base based on the task description."""
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=KnowledgeQueryStartedEvent(
|
||||
task_prompt=task_prompt,
|
||||
agent=self,
|
||||
from_task=task,
|
||||
from_agent=self,
|
||||
),
|
||||
)
|
||||
query = self.i18n.slice("knowledge_search_query").format(
|
||||
@@ -765,8 +769,9 @@ class Agent(BaseAgent):
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=KnowledgeQueryFailedEvent(
|
||||
agent=self,
|
||||
error="LLM is not compatible with knowledge search queries",
|
||||
from_task=task,
|
||||
from_agent=self,
|
||||
),
|
||||
)
|
||||
return None
|
||||
@@ -785,7 +790,8 @@ class Agent(BaseAgent):
|
||||
self,
|
||||
event=KnowledgeQueryCompletedEvent(
|
||||
query=query,
|
||||
agent=self,
|
||||
from_task=task,
|
||||
from_agent=self,
|
||||
),
|
||||
)
|
||||
return rewritten_query
|
||||
@@ -793,15 +799,16 @@ class Agent(BaseAgent):
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=KnowledgeQueryFailedEvent(
|
||||
agent=self,
|
||||
error=str(e),
|
||||
from_task=task,
|
||||
from_agent=self,
|
||||
),
|
||||
)
|
||||
return None
|
||||
|
||||
def kickoff(
|
||||
self,
|
||||
messages: str | list[dict[str, str]],
|
||||
messages: str | list[LLMMessage],
|
||||
response_format: type[Any] | None = None,
|
||||
) -> LiteAgentOutput:
|
||||
"""
|
||||
@@ -841,7 +848,7 @@ class Agent(BaseAgent):
|
||||
|
||||
async def kickoff_async(
|
||||
self,
|
||||
messages: str | list[dict[str, str]],
|
||||
messages: str | list[LLMMessage],
|
||||
response_format: type[Any] | None = None,
|
||||
) -> LiteAgentOutput:
|
||||
"""
|
||||
@@ -871,6 +878,7 @@ class Agent(BaseAgent):
|
||||
response_format=response_format,
|
||||
i18n=self.i18n,
|
||||
original_agent=self,
|
||||
guardrail=self.guardrail,
|
||||
)
|
||||
|
||||
return await lite_agent.kickoff_async(messages)
|
||||
|
||||
@@ -32,6 +32,13 @@ from crewai.events.types.flow_events import (
|
||||
MethodExecutionFinishedEvent,
|
||||
MethodExecutionStartedEvent,
|
||||
)
|
||||
from crewai.events.types.knowledge_events import (
|
||||
KnowledgeQueryCompletedEvent,
|
||||
KnowledgeQueryFailedEvent,
|
||||
KnowledgeQueryStartedEvent,
|
||||
KnowledgeRetrievalCompletedEvent,
|
||||
KnowledgeRetrievalStartedEvent,
|
||||
)
|
||||
from crewai.events.types.llm_events import (
|
||||
LLMCallCompletedEvent,
|
||||
LLMCallFailedEvent,
|
||||
@@ -310,6 +317,26 @@ class TraceCollectionListener(BaseEventListener):
|
||||
def on_agent_reasoning_failed(source, event):
|
||||
self._handle_action_event("agent_reasoning_failed", source, event)
|
||||
|
||||
@event_bus.on(KnowledgeRetrievalStartedEvent)
|
||||
def on_knowledge_retrieval_started(source, event):
|
||||
self._handle_action_event("knowledge_retrieval_started", source, event)
|
||||
|
||||
@event_bus.on(KnowledgeRetrievalCompletedEvent)
|
||||
def on_knowledge_retrieval_completed(source, event):
|
||||
self._handle_action_event("knowledge_retrieval_completed", source, event)
|
||||
|
||||
@event_bus.on(KnowledgeQueryStartedEvent)
|
||||
def on_knowledge_query_started(source, event):
|
||||
self._handle_action_event("knowledge_query_started", source, event)
|
||||
|
||||
@event_bus.on(KnowledgeQueryCompletedEvent)
|
||||
def on_knowledge_query_completed(source, event):
|
||||
self._handle_action_event("knowledge_query_completed", source, event)
|
||||
|
||||
@event_bus.on(KnowledgeQueryFailedEvent)
|
||||
def on_knowledge_query_failed(source, event):
|
||||
self._handle_action_event("knowledge_query_failed", source, event)
|
||||
|
||||
def _initialize_crew_batch(self, source: Any, event: Any):
|
||||
"""Initialize trace batch"""
|
||||
user_context = self._get_user_context()
|
||||
|
||||
@@ -1,51 +1,60 @@
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from typing import Any
|
||||
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
|
||||
class KnowledgeRetrievalStartedEvent(BaseEvent):
|
||||
class KnowledgeEventBase(BaseEvent):
|
||||
task_id: str | None = None
|
||||
task_name: str | None = None
|
||||
from_task: Any | None = None
|
||||
from_agent: Any | None = None
|
||||
agent_role: str | None = None
|
||||
agent_id: str | None = None
|
||||
|
||||
def __init__(self, **data):
|
||||
super().__init__(**data)
|
||||
self._set_agent_params(data)
|
||||
self._set_task_params(data)
|
||||
|
||||
|
||||
class KnowledgeRetrievalStartedEvent(KnowledgeEventBase):
|
||||
"""Event emitted when a knowledge retrieval is started."""
|
||||
|
||||
type: str = "knowledge_search_query_started"
|
||||
agent: BaseAgent
|
||||
|
||||
|
||||
class KnowledgeRetrievalCompletedEvent(BaseEvent):
|
||||
class KnowledgeRetrievalCompletedEvent(KnowledgeEventBase):
|
||||
"""Event emitted when a knowledge retrieval is completed."""
|
||||
|
||||
query: str
|
||||
type: str = "knowledge_search_query_completed"
|
||||
agent: BaseAgent
|
||||
retrieved_knowledge: str
|
||||
|
||||
|
||||
class KnowledgeQueryStartedEvent(BaseEvent):
|
||||
class KnowledgeQueryStartedEvent(KnowledgeEventBase):
|
||||
"""Event emitted when a knowledge query is started."""
|
||||
|
||||
task_prompt: str
|
||||
type: str = "knowledge_query_started"
|
||||
agent: BaseAgent
|
||||
|
||||
|
||||
class KnowledgeQueryFailedEvent(BaseEvent):
|
||||
class KnowledgeQueryFailedEvent(KnowledgeEventBase):
|
||||
"""Event emitted when a knowledge query fails."""
|
||||
|
||||
type: str = "knowledge_query_failed"
|
||||
agent: BaseAgent
|
||||
error: str
|
||||
|
||||
|
||||
class KnowledgeQueryCompletedEvent(BaseEvent):
|
||||
class KnowledgeQueryCompletedEvent(KnowledgeEventBase):
|
||||
"""Event emitted when a knowledge query is completed."""
|
||||
|
||||
query: str
|
||||
type: str = "knowledge_query_completed"
|
||||
agent: BaseAgent
|
||||
|
||||
|
||||
class KnowledgeSearchQueryFailedEvent(BaseEvent):
|
||||
class KnowledgeSearchQueryFailedEvent(KnowledgeEventBase):
|
||||
"""Event emitted when a knowledge search query fails."""
|
||||
|
||||
query: str
|
||||
type: str = "knowledge_search_query_failed"
|
||||
agent: BaseAgent
|
||||
error: str
|
||||
|
||||
@@ -5,7 +5,21 @@ from typing import Any
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
|
||||
class LLMGuardrailStartedEvent(BaseEvent):
|
||||
class LLMGuardrailBaseEvent(BaseEvent):
|
||||
task_id: str | None = None
|
||||
task_name: str | None = None
|
||||
from_task: Any | None = None
|
||||
from_agent: Any | None = None
|
||||
agent_role: str | None = None
|
||||
agent_id: str | None = None
|
||||
|
||||
def __init__(self, **data):
|
||||
super().__init__(**data)
|
||||
self._set_agent_params(data)
|
||||
self._set_task_params(data)
|
||||
|
||||
|
||||
class LLMGuardrailStartedEvent(LLMGuardrailBaseEvent):
|
||||
"""Event emitted when a guardrail task starts
|
||||
|
||||
Attributes:
|
||||
@@ -29,7 +43,7 @@ class LLMGuardrailStartedEvent(BaseEvent):
|
||||
self.guardrail = getsource(self.guardrail).strip()
|
||||
|
||||
|
||||
class LLMGuardrailCompletedEvent(BaseEvent):
|
||||
class LLMGuardrailCompletedEvent(LLMGuardrailBaseEvent):
|
||||
"""Event emitted when a guardrail task completes
|
||||
|
||||
Attributes:
|
||||
@@ -44,3 +58,16 @@ class LLMGuardrailCompletedEvent(BaseEvent):
|
||||
result: Any
|
||||
error: str | None = None
|
||||
retry_count: int
|
||||
|
||||
|
||||
class LLMGuardrailFailedEvent(LLMGuardrailBaseEvent):
|
||||
"""Event emitted when a guardrail task fails
|
||||
|
||||
Attributes:
|
||||
error: The error message
|
||||
retry_count: The number of times the guardrail has been retried
|
||||
"""
|
||||
|
||||
type: str = "llm_guardrail_failed"
|
||||
error: str
|
||||
retry_count: int
|
||||
|
||||
@@ -1377,7 +1377,7 @@ class ConsoleFormatter:
|
||||
if isinstance(formatted_answer, AgentAction):
|
||||
thought = re.sub(r"\n+", "\n", formatted_answer.thought)
|
||||
formatted_json = json.dumps(
|
||||
formatted_answer.tool_input,
|
||||
json.loads(formatted_answer.tool_input),
|
||||
indent=2,
|
||||
ensure_ascii=False,
|
||||
)
|
||||
|
||||
@@ -3,6 +3,7 @@ from collections.abc import Callable
|
||||
import inspect
|
||||
from typing import (
|
||||
Any,
|
||||
Literal,
|
||||
cast,
|
||||
get_args,
|
||||
get_origin,
|
||||
@@ -62,6 +63,7 @@ from crewai.utilities.llm_utils import create_llm
|
||||
from crewai.utilities.printer import Printer
|
||||
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||
from crewai.utilities.tool_utils import execute_tool_and_check_finality
|
||||
from crewai.utilities.types import LLMMessage
|
||||
|
||||
|
||||
class LiteAgentOutput(BaseModel):
|
||||
@@ -180,7 +182,7 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
_token_process: TokenProcess = PrivateAttr(default_factory=TokenProcess)
|
||||
_cache_handler: CacheHandler = PrivateAttr(default_factory=CacheHandler)
|
||||
_key: str = PrivateAttr(default_factory=lambda: str(uuid.uuid4()))
|
||||
_messages: list[dict[str, str]] = PrivateAttr(default_factory=list)
|
||||
_messages: list[LLMMessage] = PrivateAttr(default_factory=list)
|
||||
_iterations: int = PrivateAttr(default=0)
|
||||
_printer: Printer = PrivateAttr(default_factory=Printer)
|
||||
_guardrail: Callable | None = PrivateAttr(default=None)
|
||||
@@ -219,7 +221,6 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
raise TypeError(
|
||||
f"Guardrail requires LLM instance of type BaseLLM, got {type(self.llm).__name__}"
|
||||
)
|
||||
|
||||
self._guardrail = LLMGuardrail(description=self.guardrail, llm=self.llm)
|
||||
|
||||
return self
|
||||
@@ -276,7 +277,7 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
"""Return the original role for compatibility with tool interfaces."""
|
||||
return self.role
|
||||
|
||||
def kickoff(self, messages: str | list[dict[str, str]]) -> LiteAgentOutput:
|
||||
def kickoff(self, messages: str | list[LLMMessage]) -> LiteAgentOutput:
|
||||
"""
|
||||
Execute the agent with the given messages.
|
||||
|
||||
@@ -371,6 +372,7 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
guardrail=self._guardrail,
|
||||
retry_count=self._guardrail_retry_count,
|
||||
event_source=self,
|
||||
from_agent=self,
|
||||
)
|
||||
|
||||
if not guardrail_result.success:
|
||||
@@ -420,9 +422,7 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
|
||||
return output
|
||||
|
||||
async def kickoff_async(
|
||||
self, messages: str | list[dict[str, str]]
|
||||
) -> LiteAgentOutput:
|
||||
async def kickoff_async(self, messages: str | list[LLMMessage]) -> LiteAgentOutput:
|
||||
"""
|
||||
Execute the agent asynchronously with the given messages.
|
||||
|
||||
@@ -467,9 +467,7 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
|
||||
return base_prompt
|
||||
|
||||
def _format_messages(
|
||||
self, messages: str | list[dict[str, str]]
|
||||
) -> list[dict[str, str]]:
|
||||
def _format_messages(self, messages: str | list[LLMMessage]) -> list[LLMMessage]:
|
||||
"""Format messages for the LLM."""
|
||||
if isinstance(messages, str):
|
||||
messages = [{"role": "user", "content": messages}]
|
||||
@@ -477,7 +475,9 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
system_prompt = self._get_default_system_prompt()
|
||||
|
||||
# Add system message at the beginning
|
||||
formatted_messages = [{"role": "system", "content": system_prompt}]
|
||||
formatted_messages: list[LLMMessage] = [
|
||||
{"role": "system", "content": system_prompt}
|
||||
]
|
||||
|
||||
# Add the rest of the messages
|
||||
formatted_messages.extend(messages)
|
||||
@@ -589,6 +589,8 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
),
|
||||
)
|
||||
|
||||
def _append_message(self, text: str, role: str = "assistant") -> None:
|
||||
def _append_message(
|
||||
self, text: str, role: Literal["user", "assistant", "system"] = "assistant"
|
||||
) -> None:
|
||||
"""Append a message to the message list with the given role."""
|
||||
self._messages.append(format_message_for_llm(text, role=role))
|
||||
self._messages.append(cast(LLMMessage, format_message_for_llm(text, role=role)))
|
||||
|
||||
@@ -462,6 +462,8 @@ class Task(BaseModel):
|
||||
guardrail=self._guardrail,
|
||||
retry_count=self.retry_count,
|
||||
event_source=self,
|
||||
from_task=self,
|
||||
from_agent=agent,
|
||||
)
|
||||
if not guardrail_result.success:
|
||||
if self.retry_count >= self.guardrail_max_retries:
|
||||
|
||||
@@ -31,6 +31,7 @@ from crewai.utilities.types import LLMMessage
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agent import Agent
|
||||
from crewai.lite_agent import LiteAgent
|
||||
from crewai.llm import LLM
|
||||
from crewai.task import Task
|
||||
|
||||
@@ -224,7 +225,7 @@ def get_llm_response(
|
||||
callbacks: list[Callable[..., Any]],
|
||||
printer: Printer,
|
||||
from_task: Task | None = None,
|
||||
from_agent: Agent | None = None,
|
||||
from_agent: Agent | LiteAgent | None = None,
|
||||
) -> str:
|
||||
"""Call the LLM and return the response, handling any invalid responses.
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agent import Agent
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.llm import LLM
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
|
||||
@@ -143,7 +144,7 @@ def convert_to_model(
|
||||
result: str,
|
||||
output_pydantic: type[BaseModel] | None,
|
||||
output_json: type[BaseModel] | None,
|
||||
agent: Agent | None = None,
|
||||
agent: Agent | BaseAgent | None = None,
|
||||
converter_cls: type[Converter] | None = None,
|
||||
) -> dict[str, Any] | BaseModel | str:
|
||||
"""Convert a result string to a Pydantic model or JSON.
|
||||
@@ -215,7 +216,7 @@ def handle_partial_json(
|
||||
result: str,
|
||||
model: type[BaseModel],
|
||||
is_json_output: bool,
|
||||
agent: Agent | None,
|
||||
agent: Agent | BaseAgent | None,
|
||||
converter_cls: type[Converter] | None = None,
|
||||
) -> dict[str, Any] | BaseModel | str:
|
||||
"""Handle partial JSON in a result string and convert to Pydantic model or dict.
|
||||
@@ -260,7 +261,7 @@ def convert_with_instructions(
|
||||
result: str,
|
||||
model: type[BaseModel],
|
||||
is_json_output: bool,
|
||||
agent: Agent | None,
|
||||
agent: Agent | BaseAgent | None,
|
||||
converter_cls: type[Converter] | None = None,
|
||||
) -> dict | BaseModel | str:
|
||||
"""Convert a result string to a Pydantic model or JSON using instructions.
|
||||
@@ -283,7 +284,12 @@ def convert_with_instructions(
|
||||
"""
|
||||
if agent is None:
|
||||
raise TypeError("Agent must be provided if converter_cls is not specified.")
|
||||
llm = agent.function_calling_llm or agent.llm
|
||||
|
||||
llm = getattr(agent, "function_calling_llm", None) or agent.llm
|
||||
|
||||
if llm is None:
|
||||
raise ValueError("Agent must have a valid LLM instance for conversion")
|
||||
|
||||
instructions = get_conversion_instructions(model=model, llm=llm)
|
||||
converter = create_converter(
|
||||
agent=agent,
|
||||
@@ -299,7 +305,7 @@ def convert_with_instructions(
|
||||
|
||||
if isinstance(exported_result, ConverterError):
|
||||
Printer().print(
|
||||
content=f"{exported_result.message} Using raw output instead.",
|
||||
content=f"Failed to convert result to model: {exported_result}",
|
||||
color="red",
|
||||
)
|
||||
return result
|
||||
@@ -308,7 +314,7 @@ def convert_with_instructions(
|
||||
|
||||
|
||||
def get_conversion_instructions(
|
||||
model: type[BaseModel], llm: BaseLLM | LLM | str
|
||||
model: type[BaseModel], llm: BaseLLM | LLM | str | Any
|
||||
) -> str:
|
||||
"""Generate conversion instructions based on the model and LLM capabilities.
|
||||
|
||||
@@ -357,7 +363,7 @@ class CreateConverterKwargs(TypedDict, total=False):
|
||||
|
||||
|
||||
def create_converter(
|
||||
agent: Agent | None = None,
|
||||
agent: Agent | BaseAgent | None = None,
|
||||
converter_cls: type[Converter] | None = None,
|
||||
*args: Any,
|
||||
**kwargs: Unpack[CreateConverterKwargs],
|
||||
|
||||
@@ -7,7 +7,9 @@ from pydantic import BaseModel, Field, field_validator
|
||||
from typing_extensions import Self
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.lite_agent import LiteAgentOutput
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.lite_agent import LiteAgent, LiteAgentOutput
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
|
||||
|
||||
@@ -79,6 +81,8 @@ def process_guardrail(
|
||||
guardrail: Callable[[Any], tuple[bool, Any | str]],
|
||||
retry_count: int,
|
||||
event_source: Any | None = None,
|
||||
from_agent: BaseAgent | LiteAgent | None = None,
|
||||
from_task: Task | None = None,
|
||||
) -> GuardrailResult:
|
||||
"""Process the guardrail for the agent output.
|
||||
|
||||
@@ -111,7 +115,12 @@ def process_guardrail(
|
||||
|
||||
crewai_event_bus.emit(
|
||||
event_source,
|
||||
LLMGuardrailStartedEvent(guardrail=guardrail, retry_count=retry_count),
|
||||
LLMGuardrailStartedEvent(
|
||||
guardrail=guardrail,
|
||||
retry_count=retry_count,
|
||||
from_agent=from_agent,
|
||||
from_task=from_task,
|
||||
),
|
||||
)
|
||||
|
||||
result = guardrail(output)
|
||||
@@ -124,6 +133,8 @@ def process_guardrail(
|
||||
result=guardrail_result.result,
|
||||
error=guardrail_result.error,
|
||||
retry_count=retry_count,
|
||||
from_agent=from_agent,
|
||||
from_task=from_task,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ from crewai.utilities.i18n import I18N
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agent import Agent
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.llm import LLM
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.task import Task
|
||||
@@ -25,7 +26,7 @@ def execute_tool_and_check_finality(
|
||||
agent_role: str | None = None,
|
||||
tools_handler: ToolsHandler | None = None,
|
||||
task: Task | None = None,
|
||||
agent: Agent | None = None,
|
||||
agent: Agent | BaseAgent | None = None,
|
||||
function_calling_llm: BaseLLM | LLM | None = None,
|
||||
fingerprint_context: dict[str, str] | None = None,
|
||||
) -> ToolResult:
|
||||
|
||||
Reference in New Issue
Block a user