its alive!!

This commit is contained in:
Brandon Hancock
2024-12-27 13:57:09 -05:00
parent 2f882d68ad
commit 0e7aa192c0
5 changed files with 75 additions and 24 deletions

View File

@@ -188,6 +188,8 @@ class Agent(BaseAgent):
task_prompt = task.prompt()
print("task_prompt:", task_prompt)
# If the task requires output in JSON or Pydantic format,
# append specific instructions to the task prompt to ensure
# that the final answer does not include any code block markers

View File

@@ -1,3 +1,6 @@
import json
from typing import Any, Dict, List
import click
from crewai.cli.fetch_chat_llm import fetch_chat_llm
@@ -62,9 +65,13 @@ def run_chat():
click.secho("No valid Chat LLM returned. Exiting.", fg="red")
return
# 5) Prepare available_functions for the callback dictionary
# Create a wrapper function that captures 'messages' from the enclosing scope
def run_crew_tool_with_messages(**kwargs):
return run_crew_tool(messages, **kwargs)
# 5) Prepare available_functions with the wrapper function
available_functions = {
crew_inputs.crew_name: run_crew_tool, # The LLM can call run_crew_tool using the crew's name
crew_inputs.crew_name: run_crew_tool_with_messages,
}
click.secho(
@@ -134,11 +141,12 @@ def generate_crew_tool_schema(crew_inputs: ChatInputs) -> dict:
}
def run_crew_tool(**kwargs) -> str:
def run_crew_tool(messages: List[Dict[str, str]], **kwargs: Any) -> str:
"""
Subprocess-based function that:
1) Calls 'uv run run_crew' (which in turn calls your crew's 'run()' in main.py)
2) Passes the LLM-provided kwargs as CLI overrides (e.g. --key=value).
3) Also takes in messages from the main chat loop and passes them to the command.
"""
import subprocess
@@ -149,9 +157,13 @@ def run_crew_tool(**kwargs) -> str:
val_str = str(value)
command.append(f"--{key}={val_str}")
# Serialize messages to JSON and add to command
messages_json = json.dumps(messages)
command.append(f"--crew_chat_messages={messages_json}")
try:
# Capture stdout so we can return it to the LLM
result = subprocess.run(command, capture_output=True, text=True, check=True)
result = subprocess.run(command, text=True, check=True)
stdout_str = result.stdout.strip()
return stdout_str if stdout_str else "No output from run_crew command."
except subprocess.CalledProcessError as e:

View File

@@ -1047,7 +1047,7 @@ class Crew(BaseModel):
def _interpolate_inputs(self, inputs: Dict[str, Any]) -> None:
"""Interpolates the inputs in the tasks and agents."""
[
task.interpolate_inputs(
task.interpolate_inputs_and_add_conversation_history(
# type: ignore # "interpolate_inputs" of "Task" does not return a value (it only ever returns None)
inputs
)

View File

@@ -127,38 +127,34 @@ class Task(BaseModel):
processed_by_agents: Set[str] = Field(default_factory=set)
guardrail: Optional[Callable[[TaskOutput], Tuple[bool, Any]]] = Field(
default=None,
description="Function to validate task output before proceeding to next task"
description="Function to validate task output before proceeding to next task",
)
max_retries: int = Field(
default=3,
description="Maximum number of retries when guardrail fails"
)
retry_count: int = Field(
default=0,
description="Current number of retries"
default=3, description="Maximum number of retries when guardrail fails"
)
retry_count: int = Field(default=0, description="Current number of retries")
@field_validator("guardrail")
@classmethod
def validate_guardrail_function(cls, v: Optional[Callable]) -> Optional[Callable]:
"""Validate that the guardrail function has the correct signature and behavior.
While type hints provide static checking, this validator ensures runtime safety by:
1. Verifying the function accepts exactly one parameter (the TaskOutput)
2. Checking return type annotations match Tuple[bool, Any] if present
3. Providing clear, immediate error messages for debugging
This runtime validation is crucial because:
- Type hints are optional and can be ignored at runtime
- Function signatures need immediate validation before task execution
- Clear error messages help users debug guardrail implementation issues
Args:
v: The guardrail function to validate
Returns:
The validated guardrail function
Raises:
ValueError: If the function signature is invalid or return annotation
doesn't match Tuple[bool, Any]
@@ -171,8 +167,13 @@ class Task(BaseModel):
# Check return annotation if present, but don't require it
return_annotation = sig.return_annotation
if return_annotation != inspect.Signature.empty:
if not (return_annotation == Tuple[bool, Any] or str(return_annotation) == 'Tuple[bool, Any]'):
raise ValueError("If return type is annotated, it must be Tuple[bool, Any]")
if not (
return_annotation == Tuple[bool, Any]
or str(return_annotation) == "Tuple[bool, Any]"
):
raise ValueError(
"If return type is annotated, it must be Tuple[bool, Any]"
)
return v
_telemetry: Telemetry = PrivateAttr(default_factory=Telemetry)
@@ -353,7 +354,9 @@ class Task(BaseModel):
if isinstance(guardrail_result.result, str):
task_output.raw = guardrail_result.result
pydantic_output, json_output = self._export_output(guardrail_result.result)
pydantic_output, json_output = self._export_output(
guardrail_result.result
)
task_output.pydantic = pydantic_output
task_output.json_dict = json_output
elif isinstance(guardrail_result.result, TaskOutput):
@@ -393,7 +396,9 @@ class Task(BaseModel):
tasks_slices = [self.description, output]
return "\n".join(tasks_slices)
def interpolate_inputs(self, inputs: Dict[str, Any]) -> None:
def interpolate_inputs_and_add_conversation_history(
self, inputs: Dict[str, Any]
) -> None:
"""Interpolate inputs into the task description and expected output."""
if self._original_description is None:
self._original_description = self.description
@@ -406,6 +411,36 @@ class Task(BaseModel):
input_string=self._original_expected_output, inputs=inputs
)
if "crew_chat_messages" in inputs and inputs["crew_chat_messages"]:
# Fetch the conversation history instruction using self.i18n.slice
conversation_instruction = self.i18n.slice(
"conversation_history_instruction"
)
print("crew_chat_messages:", inputs["crew_chat_messages"])
try:
crew_chat_messages = json.loads(inputs["crew_chat_messages"])
print("crew_chat_messages successfully parsed as a list")
except json.JSONDecodeError:
print("Failed to parse crew_chat_messages as JSON")
crew_chat_messages = []
# Debug print to check the input
print("crew_chat_messages input:", inputs["crew_chat_messages"])
conversation_history = "\n".join(
f"{msg['role'].capitalize()}: {msg['content']}"
for msg in crew_chat_messages
if isinstance(msg, dict) and "role" in msg and "content" in msg
)
print("conversation_history:", conversation_history)
# Add the instruction and conversation history to the description
self.description += (
f"\n\n{conversation_instruction}\n\n{conversation_history}"
)
print("UPDATED DESCRIPTION:", self.description)
def interpolate_only(self, input_string: str, inputs: Dict[str, Any]) -> str:
"""Interpolate placeholders (e.g., {key}) in a string while leaving JSON untouched."""
escaped_string = input_string.replace("{", "{{").replace("}", "}}")
@@ -496,10 +531,10 @@ class Task(BaseModel):
def _save_file(self, result: Any) -> None:
"""Save task output to a file.
Args:
result: The result to save to the file. Can be a dict or any stringifiable object.
Raises:
ValueError: If output_file is not set
RuntimeError: If there is an error writing to the file
@@ -517,6 +552,7 @@ class Task(BaseModel):
with resolved_path.open("w", encoding="utf-8") as file:
if isinstance(result, dict):
import json
json.dump(result, file, ensure_ascii=False, indent=2)
else:
file.write(str(result))

View File

@@ -23,7 +23,8 @@
"summary": "This is a summary of our conversation so far:\n{merged_summary}",
"manager_request": "Your best answer to your coworker asking you this, accounting for the context shared.",
"formatted_task_instructions": "Ensure your final answer contains only the content in the following format: {output_format}\n\nEnsure the final output does not include any code block markers like ```json or ```python.",
"human_feedback_classification": "Determine if the following feedback indicates that the user is satisfied or if further changes are needed. Respond with 'True' if further changes are needed, or 'False' if the user is satisfied. **Important** Do not include any additional commentary outside of your 'True' or 'False' response.\n\nFeedback: \"{feedback}\""
"human_feedback_classification": "Determine if the following feedback indicates that the user is satisfied or if further changes are needed. Respond with 'True' if further changes are needed, or 'False' if the user is satisfied. **Important** Do not include any additional commentary outside of your 'True' or 'False' response.\n\nFeedback: \"{feedback}\"",
"conversation_history_instruction": "You are a member of a crew collaborating to achieve a common goal. Your task is a specific action that contributes to this larger objective. For additional context, please review the conversation history between you and the user that led to the initiation of this crew. Use any relevant information or feedback from the conversation to inform your task execution and ensure your response aligns with both the immediate task and the crew's overall goals."
},
"errors": {
"force_final_answer_error": "You can't keep going, this was the best you could do.\n {formatted_answer.text}",