mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-24 07:38:14 +00:00
Merge branch 'main' into devin/1740069574-improve-guardrail-validation
This commit is contained in:
@@ -114,10 +114,15 @@ class CrewAgentExecutorMixin:
|
||||
prompt = (
|
||||
"\n\n=====\n"
|
||||
"## HUMAN FEEDBACK: Provide feedback on the Final Result and Agent's actions.\n"
|
||||
"Respond with 'looks good' to accept or provide specific improvement requests.\n"
|
||||
"You can provide multiple rounds of feedback until satisfied.\n"
|
||||
"Please follow these guidelines:\n"
|
||||
" - If you are happy with the result, simply hit Enter without typing anything.\n"
|
||||
" - Otherwise, provide specific improvement requests.\n"
|
||||
" - You can provide multiple rounds of feedback until satisfied.\n"
|
||||
"=====\n"
|
||||
)
|
||||
|
||||
self._printer.print(content=prompt, color="bold_yellow")
|
||||
return input()
|
||||
response = input()
|
||||
if response.strip() != "":
|
||||
self._printer.print(content="\nProcessing your feedback...", color="cyan")
|
||||
return response
|
||||
|
||||
@@ -31,11 +31,11 @@ class OutputConverter(BaseModel, ABC):
|
||||
)
|
||||
|
||||
@abstractmethod
|
||||
def to_pydantic(self, current_attempt=1):
|
||||
def to_pydantic(self, current_attempt=1) -> BaseModel:
|
||||
"""Convert text to pydantic."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def to_json(self, current_attempt=1):
|
||||
def to_json(self, current_attempt=1) -> dict:
|
||||
"""Convert text to json."""
|
||||
pass
|
||||
|
||||
@@ -548,10 +548,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self, initial_answer: AgentFinish, feedback: str
|
||||
) -> AgentFinish:
|
||||
"""Process feedback for training scenarios with single iteration."""
|
||||
self._printer.print(
|
||||
content="\nProcessing training feedback.\n",
|
||||
color="yellow",
|
||||
)
|
||||
self._handle_crew_training_output(initial_answer, feedback)
|
||||
self.messages.append(
|
||||
self._format_msg(
|
||||
@@ -571,9 +567,8 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
answer = current_answer
|
||||
|
||||
while self.ask_for_human_input:
|
||||
response = self._get_llm_feedback_response(feedback)
|
||||
|
||||
if not self._feedback_requires_changes(response):
|
||||
# If the user provides a blank response, assume they are happy with the result
|
||||
if feedback.strip() == "":
|
||||
self.ask_for_human_input = False
|
||||
else:
|
||||
answer = self._process_feedback_iteration(feedback)
|
||||
@@ -581,27 +576,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
|
||||
return answer
|
||||
|
||||
def _get_llm_feedback_response(self, feedback: str) -> Optional[str]:
|
||||
"""Get LLM classification of whether feedback requires changes."""
|
||||
prompt = self._i18n.slice("human_feedback_classification").format(
|
||||
feedback=feedback
|
||||
)
|
||||
message = self._format_msg(prompt, role="system")
|
||||
|
||||
for retry in range(MAX_LLM_RETRY):
|
||||
try:
|
||||
response = self.llm.call([message], callbacks=self.callbacks)
|
||||
return response.strip().lower() if response else None
|
||||
except Exception as error:
|
||||
self._log_feedback_error(retry, error)
|
||||
|
||||
self._log_max_retries_exceeded()
|
||||
return None
|
||||
|
||||
def _feedback_requires_changes(self, response: Optional[str]) -> bool:
|
||||
"""Determine if feedback response indicates need for changes."""
|
||||
return response == "true" if response else False
|
||||
|
||||
def _process_feedback_iteration(self, feedback: str) -> AgentFinish:
|
||||
"""Process a single feedback iteration."""
|
||||
self.messages.append(
|
||||
|
||||
@@ -26,9 +26,9 @@ from crewai.utilities.events.tool_usage_events import ToolExecutionErrorEvent
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore", UserWarning)
|
||||
import litellm
|
||||
from litellm import Choices, get_supported_openai_params
|
||||
from litellm import Choices
|
||||
from litellm.types.utils import ModelResponse
|
||||
from litellm.utils import supports_response_schema
|
||||
from litellm.utils import get_supported_openai_params, supports_response_schema
|
||||
|
||||
|
||||
from crewai.traces.unified_trace_controller import trace_llm_call
|
||||
@@ -449,7 +449,7 @@ class LLM:
|
||||
def supports_function_calling(self) -> bool:
|
||||
try:
|
||||
params = get_supported_openai_params(model=self.model)
|
||||
return "response_format" in params
|
||||
return params is not None and "tools" in params
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to get supported params: {str(e)}")
|
||||
return False
|
||||
@@ -457,7 +457,7 @@ class LLM:
|
||||
def supports_stop_words(self) -> bool:
|
||||
try:
|
||||
params = get_supported_openai_params(model=self.model)
|
||||
return "stop" in params
|
||||
return params is not None and "stop" in params
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to get supported params: {str(e)}")
|
||||
return False
|
||||
|
||||
@@ -23,7 +23,6 @@
|
||||
"summary": "This is a summary of our conversation so far:\n{merged_summary}",
|
||||
"manager_request": "Your best answer to your coworker asking you this, accounting for the context shared.",
|
||||
"formatted_task_instructions": "Ensure your final answer contains only the content in the following format: {output_format}\n\nEnsure the final output does not include any code block markers like ```json or ```python.",
|
||||
"human_feedback_classification": "Determine if the following feedback indicates that the user is satisfied or if further changes are needed. Respond with 'True' if further changes are needed, or 'False' if the user is satisfied. **Important** Do not include any additional commentary outside of your 'True' or 'False' response.\n\nFeedback: \"{feedback}\"",
|
||||
"conversation_history_instruction": "You are a member of a crew collaborating to achieve a common goal. Your task is a specific action that contributes to this larger objective. For additional context, please review the conversation history between you and the user that led to the initiation of this crew. Use any relevant information or feedback from the conversation to inform your task execution and ensure your response aligns with both the immediate task and the crew's overall goals.",
|
||||
"feedback_instructions": "User feedback: {feedback}\nInstructions: Use this feedback to enhance the next output iteration.\nNote: Do not respond or add commentary."
|
||||
},
|
||||
|
||||
@@ -20,11 +20,11 @@ class ConverterError(Exception):
|
||||
class Converter(OutputConverter):
|
||||
"""Class that converts text into either pydantic or json."""
|
||||
|
||||
def to_pydantic(self, current_attempt=1):
|
||||
def to_pydantic(self, current_attempt=1) -> BaseModel:
|
||||
"""Convert text to pydantic."""
|
||||
try:
|
||||
if self.llm.supports_function_calling():
|
||||
return self._create_instructor().to_pydantic()
|
||||
result = self._create_instructor().to_pydantic()
|
||||
else:
|
||||
response = self.llm.call(
|
||||
[
|
||||
@@ -32,18 +32,40 @@ class Converter(OutputConverter):
|
||||
{"role": "user", "content": self.text},
|
||||
]
|
||||
)
|
||||
return self.model.model_validate_json(response)
|
||||
try:
|
||||
# Try to directly validate the response JSON
|
||||
result = self.model.model_validate_json(response)
|
||||
except ValidationError:
|
||||
# If direct validation fails, attempt to extract valid JSON
|
||||
result = handle_partial_json(response, self.model, False, None)
|
||||
# Ensure result is a BaseModel instance
|
||||
if not isinstance(result, BaseModel):
|
||||
if isinstance(result, dict):
|
||||
result = self.model.parse_obj(result)
|
||||
elif isinstance(result, str):
|
||||
try:
|
||||
parsed = json.loads(result)
|
||||
result = self.model.parse_obj(parsed)
|
||||
except Exception as parse_err:
|
||||
raise ConverterError(
|
||||
f"Failed to convert partial JSON result into Pydantic: {parse_err}"
|
||||
)
|
||||
else:
|
||||
raise ConverterError(
|
||||
"handle_partial_json returned an unexpected type."
|
||||
)
|
||||
return result
|
||||
except ValidationError as e:
|
||||
if current_attempt < self.max_attempts:
|
||||
return self.to_pydantic(current_attempt + 1)
|
||||
raise ConverterError(
|
||||
f"Failed to convert text into a Pydantic model due to the following validation error: {e}"
|
||||
f"Failed to convert text into a Pydantic model due to validation error: {e}"
|
||||
)
|
||||
except Exception as e:
|
||||
if current_attempt < self.max_attempts:
|
||||
return self.to_pydantic(current_attempt + 1)
|
||||
raise ConverterError(
|
||||
f"Failed to convert text into a Pydantic model due to the following error: {e}"
|
||||
f"Failed to convert text into a Pydantic model due to error: {e}"
|
||||
)
|
||||
|
||||
def to_json(self, current_attempt=1):
|
||||
@@ -197,11 +219,15 @@ def get_conversion_instructions(model: Type[BaseModel], llm: Any) -> str:
|
||||
if llm.supports_function_calling():
|
||||
model_schema = PydanticSchemaParser(model=model).get_schema()
|
||||
instructions += (
|
||||
f"\n\nThe JSON should follow this schema:\n```json\n{model_schema}\n```"
|
||||
f"\n\nOutput ONLY the valid JSON and nothing else.\n\n"
|
||||
f"The JSON must follow this schema exactly:\n```json\n{model_schema}\n```"
|
||||
)
|
||||
else:
|
||||
model_description = generate_model_description(model)
|
||||
instructions += f"\n\nThe JSON should follow this format:\n{model_description}"
|
||||
instructions += (
|
||||
f"\n\nOutput ONLY the valid JSON and nothing else.\n\n"
|
||||
f"The JSON must follow this format exactly:\n{model_description}"
|
||||
)
|
||||
return instructions
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user